From c41093053e8c23a6a69f79246a25cb2340af3d01 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Thu, 7 Jul 2016 15:01:33 -0400 Subject: [PATCH] Remove some code we no longer depend on Signed-off-by: Nalin Dahyabhai --- api/README.md | 5 - api/client/bundlefile/bundlefile.go | 71 - api/client/bundlefile/bundlefile_test.go | 79 - api/client/cli.go | 267 - api/client/client.go | 5 - api/client/commands.go | 9 - api/client/container/attach.go | 130 - api/client/container/commit.go | 93 - api/client/container/cp.go | 303 - api/client/container/create.go | 218 - api/client/container/diff.go | 61 - api/client/container/export.go | 59 - api/client/container/kill.go | 53 - api/client/container/logs.go | 88 - api/client/container/pause.go | 51 - api/client/container/port.go | 80 - api/client/container/ps.go | 125 - api/client/container/rename.go | 53 - api/client/container/restart.go | 55 - api/client/container/rm.go | 76 - api/client/container/run.go | 329 - api/client/container/start.go | 153 - api/client/container/stats.go | 223 - api/client/container/stats_helpers.go | 234 - api/client/container/stats_unit_test.go | 45 - api/client/container/stop.go | 57 - api/client/container/top.go | 59 - api/client/container/unpause.go | 51 - api/client/container/update.go | 150 - api/client/container/utils.go | 22 - api/client/container/wait.go | 52 - api/client/credentials.go | 44 - api/client/exec.go | 160 - api/client/exec_test.go | 122 - api/client/formatter/custom.go | 243 - api/client/formatter/custom_test.go | 192 - api/client/formatter/formatter.go | 307 - api/client/formatter/formatter_test.go | 537 - api/client/hijack.go | 95 - api/client/idresolver/idresolver.go | 70 - api/client/image/build.go | 432 - api/client/image/history.go | 99 - api/client/image/images.go | 103 - api/client/image/import.go | 86 - api/client/image/load.go | 67 - api/client/image/pull.go | 85 - api/client/image/push.go | 62 - api/client/image/remove.go | 70 - api/client/image/save.go | 57 - api/client/image/search.go | 135 - api/client/image/tag.go | 41 - api/client/inspect.go | 95 - api/client/inspect/inspector.go | 195 - api/client/inspect/inspector_test.go | 221 - api/client/network/cmd.go | 31 - api/client/network/connect.go | 64 - api/client/network/create.go | 222 - api/client/network/disconnect.go | 41 - api/client/network/inspect.go | 45 - api/client/network/list.go | 100 - api/client/network/remove.go | 43 - api/client/node/accept.go | 31 - api/client/node/cmd.go | 50 - api/client/node/demote.go | 31 - api/client/node/inspect.go | 141 - api/client/node/list.go | 113 - api/client/node/opts.go | 50 - api/client/node/promote.go | 31 - api/client/node/remove.go | 36 - api/client/node/tasks.go | 71 - api/client/node/update.go | 83 - api/client/plugin/cmd.go | 12 - api/client/plugin/cmd_experimental.go | 36 - api/client/plugin/disable.go | 41 - api/client/plugin/enable.go | 41 - api/client/plugin/inspect.go | 52 - api/client/plugin/install.go | 100 - api/client/plugin/list.go | 44 - api/client/plugin/push.go | 55 - api/client/plugin/remove.go | 55 - api/client/plugin/set.go | 42 - api/client/registry.go | 188 - api/client/registry/login.go | 81 - api/client/registry/logout.go | 52 - api/client/service/cmd.go | 32 - api/client/service/create.go | 71 - api/client/service/inspect.go | 171 - api/client/service/list.go | 124 - api/client/service/opts.go | 523 - api/client/service/opts_test.go | 163 - api/client/service/remove.go | 47 - api/client/service/scale.go | 88 - api/client/service/tasks.go | 78 - api/client/service/update.go | 388 - api/client/service/update_test.go | 133 - api/client/stack/cmd.go | 38 - api/client/stack/cmd_stub.go | 18 - api/client/stack/common.go | 50 - api/client/stack/config.go | 41 - api/client/stack/deploy.go | 208 - api/client/stack/opts.go | 45 - api/client/stack/remove.go | 75 - api/client/stack/tasks.go | 70 - api/client/swarm/cmd.go | 30 - api/client/swarm/init.go | 95 - api/client/swarm/inspect.go | 47 - api/client/swarm/join.go | 65 - api/client/swarm/leave.go | 44 - api/client/swarm/opts.go | 262 - api/client/swarm/opts_test.go | 136 - api/client/swarm/secret.go | 19 - api/client/swarm/update.go | 95 - api/client/system/events.go | 115 - api/client/system/events_utils.go | 66 - api/client/system/info.go | 203 - api/client/system/version.go | 110 - api/client/task/print.go | 79 - api/client/trust.go | 598 - api/client/trust_test.go | 56 - api/client/utils.go | 190 - api/client/volume/cmd.go | 29 - api/client/volume/create.go | 62 - api/client/volume/inspect.go | 46 - api/client/volume/list.go | 86 - api/client/volume/remove.go | 43 - api/common.go | 169 - api/common_test.go | 341 - api/fixtures/keyfile | 7 - api/server/httputils/decoder.go | 16 - api/server/httputils/errors.go | 93 - api/server/httputils/form.go | 73 - api/server/httputils/form_test.go | 105 - api/server/httputils/httputils.go | 106 - api/server/middleware.go | 24 - api/server/middleware/cors.go | 37 - api/server/middleware/debug.go | 76 - api/server/middleware/middleware.go | 13 - api/server/middleware/user_agent.go | 47 - api/server/middleware/version.go | 59 - api/server/middleware/version_test.go | 63 - api/server/profiler.go | 41 - api/server/router/build/backend.go | 20 - api/server/router/build/build.go | 29 - api/server/router/build/build_routes.go | 194 - api/server/router/container/backend.go | 72 - api/server/router/container/container.go | 76 - .../router/container/container_routes.go | 527 - api/server/router/container/copy.go | 119 - api/server/router/container/exec.go | 134 - api/server/router/container/inspect.go | 21 - api/server/router/image/backend.go | 43 - api/server/router/image/image.go | 49 - api/server/router/image/image_routes.go | 319 - api/server/router/local.go | 96 - api/server/router/network/backend.go | 20 - api/server/router/network/filter.go | 96 - api/server/router/network/network.go | 42 - api/server/router/network/network_routes.go | 285 - api/server/router/plugin/backend.go | 21 - api/server/router/plugin/plugin.go | 23 - .../router/plugin/plugin_experimental.go | 20 - api/server/router/plugin/plugin_regular.go | 9 - api/server/router/plugin/plugin_routes.go | 103 - api/server/router/router.go | 19 - api/server/router/swarm/backend.go | 26 - api/server/router/swarm/cluster.go | 44 - api/server/router/swarm/cluster_routes.go | 235 - api/server/router/system/backend.go | 20 - api/server/router/system/system.go | 38 - api/server/router/system/system_routes.go | 154 - api/server/router/volume/backend.go | 15 - api/server/router/volume/volume.go | 35 - api/server/router/volume/volume_routes.go | 66 - api/server/router_swapper.go | 30 - api/server/server.go | 207 - api/server/server_test.go | 46 - api/types/backend/backend.go | 85 - builder/builder.go | 155 - builder/context.go | 260 - builder/context_test.go | 307 - builder/context_unix.go | 11 - builder/context_windows.go | 17 - builder/dockerfile/bflag.go | 176 - builder/dockerfile/bflag_test.go | 187 - builder/dockerfile/builder.go | 322 - builder/dockerfile/builder_unix.go | 5 - builder/dockerfile/builder_windows.go | 3 - builder/dockerfile/command/command.go | 46 - builder/dockerfile/dispatchers.go | 760 - builder/dockerfile/dispatchers_test.go | 208 - builder/dockerfile/dispatchers_unix.go | 27 - builder/dockerfile/dispatchers_windows.go | 65 - .../dockerfile/dispatchers_windows_test.go | 34 - builder/dockerfile/envVarTest | 112 - builder/dockerfile/evaluator.go | 203 - builder/dockerfile/evaluator_test.go | 195 - builder/dockerfile/evaluator_unix.go | 9 - builder/dockerfile/evaluator_windows.go | 13 - builder/dockerfile/internals.go | 669 - builder/dockerfile/internals_test.go | 124 - builder/dockerfile/internals_unix.go | 26 - builder/dockerfile/internals_windows.go | 56 - builder/dockerfile/internals_windows_test.go | 51 - builder/dockerfile/parser/dumper/main.go | 32 - builder/dockerfile/parser/json_test.go | 55 - builder/dockerfile/parser/line_parsers.go | 361 - builder/dockerfile/parser/parser.go | 213 - builder/dockerfile/parser/parser_test.go | 166 - .../parser/testfile-line/Dockerfile | 35 - .../env_no_value/Dockerfile | 3 - .../shykes-nested-json/Dockerfile | 1 - .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 - .../testfiles/ADD-COPY-with-JSON/result | 10 - .../testfiles/brimstone-consuldock/Dockerfile | 26 - .../testfiles/brimstone-consuldock/result | 5 - .../brimstone-docker-consul/Dockerfile | 52 - .../testfiles/brimstone-docker-consul/result | 9 - .../testfiles/continueIndent/Dockerfile | 36 - .../parser/testfiles/continueIndent/result | 10 - .../testfiles/cpuguy83-nagios/Dockerfile | 54 - .../parser/testfiles/cpuguy83-nagios/result | 40 - .../parser/testfiles/docker/Dockerfile | 103 - .../dockerfile/parser/testfiles/docker/result | 24 - .../parser/testfiles/env/Dockerfile | 23 - .../dockerfile/parser/testfiles/env/result | 16 - .../testfiles/escape-after-comment/Dockerfile | 9 - .../testfiles/escape-after-comment/result | 3 - .../testfiles/escape-nonewline/Dockerfile | 7 - .../parser/testfiles/escape-nonewline/result | 3 - .../parser/testfiles/escape/Dockerfile | 6 - .../dockerfile/parser/testfiles/escape/result | 3 - .../parser/testfiles/escapes/Dockerfile | 14 - .../parser/testfiles/escapes/result | 6 - .../parser/testfiles/flags/Dockerfile | 10 - .../dockerfile/parser/testfiles/flags/result | 10 - .../parser/testfiles/health/Dockerfile | 10 - .../dockerfile/parser/testfiles/health/result | 9 - .../parser/testfiles/influxdb/Dockerfile | 15 - .../parser/testfiles/influxdb/result | 11 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../jeztah-invalid-json-single-quotes/result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../parser/testfiles/json/Dockerfile | 8 - .../dockerfile/parser/testfiles/json/result | 8 - .../kartar-entrypoint-oddities/Dockerfile | 7 - .../kartar-entrypoint-oddities/result | 7 - .../lk4d4-the-edge-case-generator/Dockerfile | 48 - .../lk4d4-the-edge-case-generator/result | 29 - .../parser/testfiles/mail/Dockerfile | 16 - .../dockerfile/parser/testfiles/mail/result | 14 - .../testfiles/multiple-volumes/Dockerfile | 3 - .../parser/testfiles/multiple-volumes/result | 2 - .../parser/testfiles/mumble/Dockerfile | 7 - .../dockerfile/parser/testfiles/mumble/result | 4 - .../parser/testfiles/nginx/Dockerfile | 14 - .../dockerfile/parser/testfiles/nginx/result | 11 - .../parser/testfiles/tf2/Dockerfile | 23 - .../dockerfile/parser/testfiles/tf2/result | 20 - .../parser/testfiles/weechat/Dockerfile | 9 - .../parser/testfiles/weechat/result | 6 - .../parser/testfiles/znc/Dockerfile | 7 - .../dockerfile/parser/testfiles/znc/result | 5 - builder/dockerfile/parser/utils.go | 176 - builder/dockerfile/shell_parser.go | 314 - builder/dockerfile/shell_parser_test.go | 143 - builder/dockerfile/support.go | 19 - builder/dockerfile/support_test.go | 65 - builder/dockerfile/utils_test.go | 50 - builder/dockerfile/wordsTest | 25 - builder/dockerignore.go | 47 - builder/dockerignore/dockerignore.go | 49 - builder/dockerignore/dockerignore_test.go | 55 - builder/dockerignore_test.go | 95 - builder/git.go | 28 - builder/remote.go | 152 - builder/remote_test.go | 208 - builder/tarsum.go | 158 - builder/tarsum_test.go | 265 - builder/utils_test.go | 87 - cli/cli.go | 196 - cli/cobraadaptor/adaptor.go | 154 - cli/error.go | 20 - cli/flagerrors.go | 21 - cli/flags/client.go | 12 - cli/flags/common.go | 123 - cli/required.go | 96 - cli/usage.go | 22 - cliconfig/config.go | 120 - cliconfig/config_test.go | 545 - cliconfig/configfile/file.go | 177 - cliconfig/configfile/file_test.go | 27 - cliconfig/credentials/credentials.go | 17 - cliconfig/credentials/default_store.go | 22 - cliconfig/credentials/default_store_darwin.go | 3 - cliconfig/credentials/default_store_linux.go | 3 - .../credentials/default_store_unsupported.go | 5 - .../credentials/default_store_windows.go | 3 - cliconfig/credentials/file_store.go | 67 - cliconfig/credentials/file_store_test.go | 139 - cliconfig/credentials/native_store.go | 126 - cliconfig/credentials/native_store_test.go | 356 - cmd/docker/daemon.go | 18 - cmd/docker/daemon_none.go | 16 - cmd/docker/daemon_none_test.go | 20 - cmd/docker/daemon_unix.go | 56 - cmd/docker/docker.go | 118 - cmd/docker/docker_test.go | 23 - cmd/docker/docker_windows.go | 18 - cmd/docker/usage.go | 22 - cmd/docker/usage_test.go | 15 - cmd/dockerd/README.md | 3 - cmd/dockerd/daemon.go | 449 - cmd/dockerd/daemon_freebsd.go | 5 - cmd/dockerd/daemon_linux.go | 11 - cmd/dockerd/daemon_no_plugin_support.go | 13 - cmd/dockerd/daemon_plugin_support.go | 14 - cmd/dockerd/daemon_solaris.go | 74 - cmd/dockerd/daemon_test.go | 294 - cmd/dockerd/daemon_unix.go | 131 - cmd/dockerd/daemon_unix_test.go | 212 - cmd/dockerd/daemon_windows.go | 82 - cmd/dockerd/docker.go | 82 - cmd/dockerd/docker_windows.go | 18 - cmd/dockerd/hack/malformed_host_override.go | 121 - .../hack/malformed_host_override_test.go | 124 - cmd/dockerd/routes.go | 9 - cmd/dockerd/routes_experimental.go | 13 - cmd/dockerd/service_unsupported.go | 7 - cmd/dockerd/service_windows.go | 369 - container/archive.go | 76 - container/container.go | 974 - container/container_solaris.go | 95 - container/container_unit_test.go | 36 - container/container_unix.go | 419 - container/container_windows.go | 109 - container/health.go | 49 - container/history.go | 30 - container/memory_store.go | 92 - container/memory_store_test.go | 106 - container/monitor.go | 46 - container/mounts_unix.go | 12 - container/mounts_windows.go | 8 - container/state.go | 310 - container/state_solaris.go | 7 - container/state_test.go | 87 - container/state_unix.go | 10 - container/state_windows.go | 7 - container/store.go | 28 - daemon/apparmor_default.go | 30 - daemon/apparmor_default_unsupported.go | 6 - daemon/archive.go | 436 - daemon/archive_unix.go | 58 - daemon/archive_windows.go | 18 - daemon/attach.go | 147 - daemon/auth.go | 13 - daemon/caps/utils_unix.go | 131 - daemon/changes.go | 15 - daemon/cluster/cluster.go | 1236 - daemon/cluster/convert/container.go | 116 - daemon/cluster/convert/network.go | 199 - daemon/cluster/convert/node.go | 95 - daemon/cluster/convert/service.go | 253 - daemon/cluster/convert/swarm.go | 166 - daemon/cluster/convert/task.go | 53 - daemon/cluster/executor/backend.go | 41 - daemon/cluster/executor/container/adapter.go | 273 - .../cluster/executor/container/container.go | 504 - .../cluster/executor/container/controller.go | 349 - daemon/cluster/executor/container/errors.go | 15 - daemon/cluster/executor/container/executor.go | 139 - .../cluster/executor/container/health_test.go | 102 - daemon/cluster/filters.go | 98 - daemon/cluster/helpers.go | 108 - daemon/cluster/provider/network.go | 37 - daemon/commit.go | 265 - daemon/config.go | 444 - daemon/config_experimental.go | 8 - daemon/config_solaris.go | 43 - daemon/config_stub.go | 8 - daemon/config_test.go | 278 - daemon/config_unix.go | 135 - daemon/config_windows.go | 63 - daemon/container.go | 256 - daemon/container_operations.go | 768 - daemon/container_operations_solaris.go | 50 - daemon/container_operations_unix.go | 385 - daemon/container_operations_windows.go | 57 - daemon/create.go | 260 - daemon/create_unix.go | 76 - daemon/create_windows.go | 80 - daemon/daemon.go | 1055 - daemon/daemon_experimental.go | 16 - daemon/daemon_linux.go | 80 - daemon/daemon_linux_test.go | 104 - daemon/daemon_solaris.go | 167 - daemon/daemon_stub.go | 12 - daemon/daemon_test.go | 532 - daemon/daemon_unix.go | 1157 - daemon/daemon_unix_test.go | 199 - daemon/daemon_unsupported.go | 5 - daemon/daemon_windows.go | 437 - daemon/debugtrap_unix.go | 21 - daemon/debugtrap_unsupported.go | 7 - daemon/debugtrap_windows.go | 30 - daemon/delete.go | 157 - daemon/delete_test.go | 42 - daemon/discovery.go | 203 - daemon/discovery_test.go | 152 - daemon/errors.go | 57 - daemon/events.go | 117 - daemon/events/events.go | 154 - daemon/events/events_test.go | 275 - daemon/events/filter.go | 87 - daemon/events/testutils/testutils.go | 76 - daemon/events_test.go | 94 - daemon/exec.go | 268 - daemon/exec/exec.go | 93 - daemon/exec_linux.go | 26 - daemon/exec_solaris.go | 11 - daemon/exec_windows.go | 13 - daemon/export.go | 55 - daemon/health.go | 315 - daemon/health_test.go | 112 - daemon/image.go | 124 - daemon/image_delete.go | 404 - daemon/image_exporter.go | 25 - daemon/image_history.go | 82 - daemon/image_inspect.go | 81 - daemon/image_pull.go | 106 - daemon/image_push.go | 58 - daemon/image_tag.go | 37 - daemon/images.go | 193 - daemon/import.go | 135 - daemon/info.go | 186 - daemon/inspect.go | 250 - daemon/inspect_solaris.go | 40 - daemon/inspect_unix.go | 91 - daemon/inspect_windows.go | 40 - daemon/keys.go | 59 - daemon/keys_unsupported.go | 8 - daemon/kill.go | 157 - daemon/links.go | 128 - daemon/links/links.go | 141 - daemon/links/links_test.go | 213 - daemon/links_test.go | 98 - daemon/list.go | 596 - daemon/list_unix.go | 11 - daemon/list_windows.go | 20 - daemon/logdrivers_linux.go | 14 - daemon/logdrivers_windows.go | 10 - daemon/logger/awslogs/cloudwatchlogs.go | 375 - daemon/logger/awslogs/cloudwatchlogs_test.go | 627 - .../logger/awslogs/cwlogsiface_mock_test.go | 77 - daemon/logger/context.go | 113 - daemon/logger/copier.go | 81 - daemon/logger/copier_test.go | 118 - daemon/logger/etwlogs/etwlogs_windows.go | 183 - daemon/logger/factory.go | 104 - daemon/logger/fluentd/fluentd.go | 200 - daemon/logger/gcplogs/gcplogging.go | 191 - daemon/logger/gelf/gelf.go | 209 - daemon/logger/gelf/gelf_unsupported.go | 3 - daemon/logger/journald/journald.go | 95 - .../logger/journald/journald_unsupported.go | 6 - daemon/logger/journald/read.go | 392 - daemon/logger/journald/read_native.go | 6 - daemon/logger/journald/read_native_compat.go | 6 - daemon/logger/journald/read_unsupported.go | 7 - daemon/logger/jsonfilelog/jsonfilelog.go | 147 - daemon/logger/jsonfilelog/jsonfilelog_test.go | 248 - daemon/logger/jsonfilelog/read.go | 239 - daemon/logger/logger.go | 113 - daemon/logger/loggerutils/log_tag.go | 28 - daemon/logger/loggerutils/log_tag_test.go | 47 - daemon/logger/loggerutils/rotatefilewriter.go | 124 - daemon/logger/splunk/splunk.go | 266 - daemon/logger/syslog/syslog.go | 258 - daemon/logger/syslog/syslog_test.go | 51 - daemon/logger/syslog/syslog_unsupported.go | 3 - daemon/logs.go | 166 - daemon/logs_test.go | 15 - daemon/monitor.go | 165 - daemon/monitor_linux.go | 19 - daemon/monitor_solaris.go | 18 - daemon/monitor_windows.go | 37 - daemon/mounts.go | 48 - daemon/names.go | 115 - daemon/network.go | 381 - daemon/network/settings.go | 24 - daemon/oci_linux.go | 711 - daemon/oci_solaris.go | 12 - daemon/oci_windows.go | 200 - daemon/pause.go | 49 - daemon/rename.go | 98 - daemon/resize.go | 40 - daemon/restart.go | 48 - daemon/search.go | 94 - daemon/search_test.go | 357 - daemon/seccomp_disabled.go | 19 - daemon/seccomp_linux.go | 48 - daemon/seccomp_unsupported.go | 5 - daemon/selinux_linux.go | 17 - daemon/selinux_unsupported.go | 13 - daemon/start.go | 194 - daemon/start_linux.go | 26 - daemon/start_windows.go | 10 - daemon/stats.go | 194 - daemon/stats_collector_solaris.go | 34 - daemon/stats_collector_unix.go | 189 - daemon/stats_collector_windows.go | 35 - daemon/stop.go | 67 - daemon/top_unix.go | 126 - daemon/top_unix_test.go | 76 - daemon/top_windows.go | 32 - daemon/unpause.go | 43 - daemon/update.go | 90 - daemon/update_linux.go | 25 - daemon/update_solaris.go | 11 - daemon/update_windows.go | 13 - daemon/volumes.go | 185 - daemon/volumes_unit_test.go | 39 - daemon/volumes_unix.go | 86 - daemon/volumes_windows.go | 51 - daemon/wait.go | 32 - distribution/errors.go | 113 - .../fixtures/validate_manifest/bad_manifest | 38 - .../validate_manifest/extra_data_manifest | 46 - .../fixtures/validate_manifest/good_manifest | 38 - distribution/metadata/metadata.go | 75 - distribution/metadata/v1_id_service.go | 44 - distribution/metadata/v1_id_service_test.go | 83 - distribution/metadata/v2_metadata_service.go | 137 - .../metadata/v2_metadata_service_test.go | 115 - distribution/pull.go | 225 - distribution/pull_v1.go | 366 - distribution/pull_v2.go | 845 - distribution/pull_v2_test.go | 183 - distribution/pull_v2_unix.go | 19 - distribution/pull_v2_windows.go | 69 - distribution/push.go | 219 - distribution/push_v1.go | 454 - distribution/push_v2.go | 452 - distribution/registry.go | 122 - distribution/registry_unit_test.go | 133 - distribution/xfer/download.go | 452 - distribution/xfer/download_test.go | 341 - distribution/xfer/transfer.go | 401 - distribution/xfer/transfer_test.go | 410 - distribution/xfer/upload.go | 168 - distribution/xfer/upload_test.go | 134 - dockerversion/useragent.go | 74 - errors/errors.go | 47 - experimental/README.md | 82 - experimental/docker-stacks-and-bundles.md | 182 - experimental/images/ipvlan-l3.gliffy | 1 - experimental/images/ipvlan-l3.png | Bin 18260 -> 0 bytes experimental/images/ipvlan-l3.svg | 1 - experimental/images/ipvlan_l2_simple.gliffy | 1 - experimental/images/ipvlan_l2_simple.png | Bin 20145 -> 0 bytes experimental/images/ipvlan_l2_simple.svg | 1 - .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 - .../images/macvlan-bridge-ipvlan-l2.png | Bin 14527 -> 0 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 - .../images/macvlan_bridge_simple.gliffy | 1 - experimental/images/macvlan_bridge_simple.png | Bin 22392 -> 0 bytes experimental/images/macvlan_bridge_simple.svg | 1 - .../images/multi_tenant_8021q_vlans.gliffy | 1 - .../images/multi_tenant_8021q_vlans.png | Bin 17879 -> 0 bytes .../images/multi_tenant_8021q_vlans.svg | 1 - experimental/images/vlans-deeper-look.gliffy | 1 - experimental/images/vlans-deeper-look.png | Bin 38837 -> 0 bytes experimental/images/vlans-deeper-look.svg | 1 - experimental/plugins_graphdriver.md | 334 - experimental/vlan-networks.md | 721 - hack/make/binary | 10 - hack/make/binary-client | 12 - hack/make/binary-daemon | 16 - hack/make/dynbinary | 10 - hack/make/dynbinary-client | 12 - hack/make/dynbinary-daemon | 16 - hack/make/install-binary-client | 10 - hack/make/install-binary-daemon | 11 - image/fs.go | 175 - image/fs_test.go | 384 - image/image.go | 140 - image/image_test.go | 59 - image/rootfs.go | 16 - image/rootfs_unix.go | 18 - image/rootfs_windows.go | 48 - image/spec/v1.1.md | 640 - image/spec/v1.md | 573 - image/store.go | 295 - image/store_test.go | 300 - image/tarexport/load.go | 389 - image/tarexport/save.go | 349 - image/tarexport/tarexport.go | 47 - image/v1/imagev1.go | 156 - image/v1/imagev1_test.go | 55 - layer/empty.go | 48 - layer/empty_test.go | 46 - layer/filestore.go | 354 - layer/filestore_test.go | 104 - layer/layer.go | 270 - layer/layer_store.go | 659 - layer/layer_store_windows.go | 11 - layer/layer_test.go | 768 - layer/layer_unix.go | 9 - layer/layer_unix_test.go | 71 - layer/layer_windows.go | 98 - layer/migration.go | 256 - layer/migration_test.go | 435 - layer/mount_test.go | 230 - layer/mounted_layer.go | 103 - layer/ro_layer.go | 172 - layer/ro_layer_windows.go | 9 - libcontainerd/client.go | 46 - libcontainerd/client_linux.go | 525 - libcontainerd/client_solaris.go | 56 - libcontainerd/client_windows.go | 442 - libcontainerd/container.go | 40 - libcontainerd/container_linux.go | 235 - libcontainerd/container_solaris.go | 5 - libcontainerd/container_windows.go | 312 - libcontainerd/pausemonitor_linux.go | 31 - libcontainerd/process.go | 18 - libcontainerd/process_linux.go | 110 - libcontainerd/process_solaris.go | 6 - libcontainerd/process_windows.go | 88 - libcontainerd/queue_linux.go | 29 - libcontainerd/remote.go | 20 - libcontainerd/remote_linux.go | 541 - libcontainerd/remote_solaris.go | 34 - libcontainerd/remote_windows.go | 36 - libcontainerd/types.go | 60 - libcontainerd/types_linux.go | 55 - libcontainerd/types_solaris.go | 38 - libcontainerd/types_windows.go | 39 - libcontainerd/utils_linux.go | 52 - libcontainerd/utils_windows.go | 37 - libcontainerd/windowsoci/oci_windows.go | 179 - libcontainerd/windowsoci/unsupported.go | 3 - migrate/v1/migratev1.go | 504 - migrate/v1/migratev1_test.go | 435 - oci/defaults_linux.go | 177 - oci/defaults_solaris.go | 11 - oci/defaults_windows.go | 23 - opts/hosts.go | 151 - opts/hosts_test.go | 148 - opts/hosts_unix.go | 8 - opts/hosts_windows.go | 6 - opts/ip.go | 42 - opts/ip_test.go | 54 - pkg/aaparser/aaparser.go | 92 - pkg/aaparser/aaparser_test.go | 73 - pkg/authorization/api.go | 54 - pkg/authorization/authz.go | 179 - pkg/authorization/authz_unix_test.go | 282 - pkg/authorization/middleware.go | 60 - pkg/authorization/plugin.go | 92 - pkg/authorization/response.go | 201 - pkg/broadcaster/unbuffered.go | 49 - pkg/broadcaster/unbuffered_test.go | 162 - pkg/discovery/README.md | 41 - pkg/discovery/backends.go | 107 - pkg/discovery/discovery.go | 35 - pkg/discovery/discovery_test.go | 137 - pkg/discovery/entry.go | 94 - pkg/discovery/file/file.go | 109 - pkg/discovery/file/file_test.go | 114 - pkg/discovery/generator.go | 35 - pkg/discovery/generator_test.go | 53 - pkg/discovery/kv/kv.go | 192 - pkg/discovery/kv/kv_test.go | 324 - pkg/discovery/memory/memory.go | 93 - pkg/discovery/memory/memory_test.go | 48 - pkg/discovery/nodes/nodes.go | 54 - pkg/discovery/nodes/nodes_test.go | 51 - pkg/filenotify/filenotify.go | 40 - pkg/filenotify/fsnotify.go | 18 - pkg/filenotify/poller.go | 204 - pkg/filenotify/poller_test.go | 119 - pkg/gitutils/gitutils.go | 100 - pkg/gitutils/gitutils_test.go | 220 - pkg/graphdb/conn_sqlite3.go | 15 - pkg/graphdb/conn_sqlite3_unix.go | 7 - pkg/graphdb/conn_sqlite3_windows.go | 7 - pkg/graphdb/conn_unsupported.go | 8 - pkg/graphdb/graphdb.go | 551 - pkg/graphdb/graphdb_test.go | 721 - pkg/graphdb/sort.go | 27 - pkg/graphdb/sort_test.go | 29 - pkg/graphdb/utils.go | 32 - pkg/httputils/httputils.go | 56 - pkg/httputils/httputils_test.go | 115 - pkg/httputils/mimetype.go | 30 - pkg/httputils/mimetype_test.go | 13 - pkg/httputils/resumablerequestreader.go | 95 - pkg/httputils/resumablerequestreader_test.go | 307 - pkg/jsonlog/jsonlog.go | 42 - pkg/jsonlog/jsonlog_marshalling.go | 178 - pkg/jsonlog/jsonlog_marshalling_test.go | 34 - pkg/jsonlog/jsonlogbytes.go | 122 - pkg/jsonlog/jsonlogbytes_test.go | 39 - pkg/jsonlog/time_marshalling.go | 27 - pkg/jsonlog/time_marshalling_test.go | 47 - pkg/listeners/listeners_solaris.go | 31 - pkg/listeners/listeners_unix.go | 94 - pkg/listeners/listeners_windows.go | 54 - pkg/locker/README.md | 65 - pkg/locker/locker.go | 112 - pkg/locker/locker_test.go | 124 - .../cmd/names-generator/main.go | 11 - pkg/namesgenerator/names-generator.go | 552 - pkg/namesgenerator/names-generator_test.go | 27 - pkg/platform/architecture_linux.go | 16 - pkg/platform/architecture_unix.go | 20 - pkg/platform/architecture_windows.go | 52 - pkg/platform/platform.go | 23 - pkg/platform/utsname_int8.go | 18 - pkg/platform/utsname_uint8.go | 18 - pkg/progress/progress.go | 73 - pkg/progress/progressreader.go | 59 - pkg/progress/progressreader_test.go | 75 - pkg/pubsub/publisher.go | 111 - pkg/pubsub/publisher_test.go | 142 - pkg/registrar/registrar.go | 127 - pkg/registrar/registrar_test.go | 119 - pkg/signal/README.md | 1 - pkg/signal/signal.go | 54 - pkg/signal/signal_darwin.go | 41 - pkg/signal/signal_freebsd.go | 43 - pkg/signal/signal_linux.go | 80 - pkg/signal/signal_solaris.go | 42 - pkg/signal/signal_unix.go | 21 - pkg/signal/signal_unsupported.go | 10 - pkg/signal/signal_windows.go | 28 - pkg/signal/trap.go | 81 - pkg/stdcopy/stdcopy.go | 185 - pkg/stdcopy/stdcopy_test.go | 260 - pkg/streamformatter/streamformatter.go | 172 - pkg/streamformatter/streamformatter_test.go | 108 - pkg/symlink/LICENSE.APACHE | 191 - pkg/symlink/LICENSE.BSD | 27 - pkg/symlink/README.md | 6 - pkg/symlink/fs.go | 143 - pkg/symlink/fs_unix.go | 11 - pkg/symlink/fs_unix_test.go | 407 - pkg/symlink/fs_windows.go | 155 - pkg/tailfile/tailfile.go | 66 - pkg/tailfile/tailfile_test.go | 148 - pkg/tarsum/builder_context.go | 21 - pkg/tarsum/builder_context_test.go | 63 - pkg/tarsum/fileinfosums.go | 126 - pkg/tarsum/fileinfosums_test.go | 62 - pkg/tarsum/tarsum.go | 295 - pkg/tarsum/tarsum_spec.md | 230 - pkg/tarsum/tarsum_test.go | 656 - .../json | 1 - .../layer.tar | Bin 9216 -> 0 bytes .../json | 1 - .../layer.tar | Bin 1536 -> 0 bytes pkg/tarsum/testdata/collision/collision-0.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-1.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-2.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-3.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/xattr/json | 1 - pkg/tarsum/testdata/xattr/layer.tar | Bin 2560 -> 0 bytes pkg/tarsum/versioning.go | 150 - pkg/tarsum/versioning_test.go | 98 - pkg/tarsum/writercloser.go | 22 - pkg/term/ascii.go | 66 - pkg/term/ascii_test.go | 43 - pkg/term/tc_linux_cgo.go | 50 - pkg/term/tc_other.go | 20 - pkg/term/tc_solaris_cgo.go | 63 - pkg/term/term.go | 109 - pkg/term/term_solaris.go | 41 - pkg/term/term_unix.go | 29 - pkg/term/term_windows.go | 303 - pkg/term/termios_darwin.go | 69 - pkg/term/termios_freebsd.go | 69 - pkg/term/termios_linux.go | 47 - pkg/term/termios_openbsd.go | 69 - pkg/term/windows/ansi_reader.go | 258 - pkg/term/windows/ansi_writer.go | 61 - pkg/term/windows/console.go | 97 - pkg/term/windows/windows.go | 33 - pkg/term/windows/windows_test.go | 3 - pkg/tlsconfig/config.go | 133 - pkg/truncindex/truncindex.go | 137 - pkg/truncindex/truncindex_test.go | 429 - pkg/urlutil/urlutil.go | 50 - pkg/urlutil/urlutil_test.go | 70 - pkg/useragent/README.md | 1 - pkg/useragent/useragent.go | 55 - pkg/useragent/useragent_test.go | 31 - plugin/backend.go | 140 - plugin/distribution/pull.go | 212 - plugin/distribution/push.go | 135 - plugin/distribution/types.go | 19 - plugin/interface.go | 9 - plugin/legacy.go | 23 - profiles/apparmor/apparmor.go | 115 - profiles/apparmor/template.go | 46 - profiles/seccomp/default.json | 1593 - profiles/seccomp/fixtures/example.json | 27 - profiles/seccomp/generate.go | 35 - profiles/seccomp/seccomp.go | 74 - profiles/seccomp/seccomp_default.go | 1879 - profiles/seccomp/seccomp_test.go | 28 - profiles/seccomp/seccomp_unsupported.go | 13 - reference/reference.go | 211 - reference/reference_test.go | 275 - reference/store.go | 287 - reference/store_test.go | 356 - registry/auth.go | 300 - registry/auth_test.go | 120 - registry/config.go | 274 - registry/config_test.go | 49 - registry/config_unix.go | 16 - registry/config_windows.go | 18 - registry/endpoint_test.go | 78 - registry/endpoint_v1.go | 198 - registry/reference.go | 68 - registry/registry.go | 190 - registry/registry_mock_test.go | 476 - registry/registry_test.go | 873 - registry/service.go | 260 - registry/service_v1.go | 53 - registry/service_v2.go | 79 - registry/session.go | 783 - registry/types.go | 70 - restartmanager/restartmanager.go | 128 - restartmanager/restartmanager_test.go | 34 - runconfig/compare.go | 61 - runconfig/compare_test.go | 126 - runconfig/config.go | 90 - runconfig/config_test.go | 134 - runconfig/config_unix.go | 59 - runconfig/config_windows.go | 19 - runconfig/errors.go | 40 - .../fixtures/unix/container_config_1_14.json | 30 - .../fixtures/unix/container_config_1_17.json | 50 - .../fixtures/unix/container_config_1_19.json | 58 - .../unix/container_hostconfig_1_14.json | 18 - .../unix/container_hostconfig_1_19.json | 30 - .../windows/container_config_1_19.json | 58 - runconfig/hostconfig.go | 35 - runconfig/hostconfig_solaris.go | 47 - runconfig/hostconfig_test.go | 222 - runconfig/hostconfig_unix.go | 106 - runconfig/hostconfig_windows.go | 51 - runconfig/opts/envfile.go | 67 - runconfig/opts/envfile_test.go | 142 - runconfig/opts/fixtures/valid.env | 1 - runconfig/opts/fixtures/valid.label | 1 - runconfig/opts/opts.go | 70 - runconfig/opts/opts_test.go | 108 - runconfig/opts/parse.go | 949 - runconfig/opts/parse_test.go | 870 - runconfig/opts/runtime.go | 74 - runconfig/opts/throttledevice.go | 113 - runconfig/opts/ulimit.go | 57 - runconfig/opts/ulimit_test.go | 42 - runconfig/opts/weightdevice.go | 89 - runconfig/streams.go | 109 - utils/debug.go | 26 - utils/debug_test.go | 43 - utils/experimental.go | 9 - utils/names.go | 12 - utils/process_unix.go | 22 - utils/process_windows.go | 20 - utils/stubs.go | 9 - utils/templates/templates.go | 33 - utils/templates/templates_test.go | 38 - utils/utils.go | 87 - utils/utils_test.go | 21 - .../src/bitbucket.org/ww/goautoneg/Makefile | 13 - .../src/bitbucket.org/ww/goautoneg/README.txt | 67 - .../src/bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../src/github.com/Azure/go-ansiterm/LICENSE | 21 - .../github.com/Azure/go-ansiterm/README.md | 9 - .../github.com/Azure/go-ansiterm/constants.go | 188 - .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 - .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - .../github.com/Azure/go-ansiterm/parser.go | 136 - .../go-ansiterm/parser_action_helpers.go | 103 - .../Azure/go-ansiterm/parser_actions.go | 122 - .../github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 182 - .../Azure/go-ansiterm/winterm/api.go | 322 - .../go-ansiterm/winterm/attr_translation.go | 100 - .../go-ansiterm/winterm/cursor_helpers.go | 101 - .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 - .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 726 - .../src/github.com/BurntSushi/toml/.gitignore | 5 - .../github.com/BurntSushi/toml/.travis.yml | 12 - .../src/github.com/BurntSushi/toml/COMPATIBLE | 3 - vendor/src/github.com/BurntSushi/toml/COPYING | 14 - .../src/github.com/BurntSushi/toml/Makefile | 19 - .../src/github.com/BurntSushi/toml/README.md | 220 - .../toml/cmd/toml-test-decoder/COPYING | 14 - .../toml/cmd/toml-test-encoder/COPYING | 14 - .../BurntSushi/toml/cmd/tomlv/COPYING | 14 - .../src/github.com/BurntSushi/toml/decode.go | 492 - .../github.com/BurntSushi/toml/decode_meta.go | 122 - vendor/src/github.com/BurntSushi/toml/doc.go | 27 - .../src/github.com/BurntSushi/toml/encode.go | 496 - .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/src/github.com/BurntSushi/toml/lex.go | 874 - .../src/github.com/BurntSushi/toml/parse.go | 498 - .../github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_check.go | 91 - .../github.com/BurntSushi/toml/type_fields.go | 241 - .../src/github.com/Graylog2/go-gelf/LICENSE | 21 - .../Graylog2/go-gelf/gelf/reader.go | 140 - .../Graylog2/go-gelf/gelf/writer.go | 418 - .../src/github.com/RackSec/srslog/.gitignore | 1 - .../src/github.com/RackSec/srslog/.travis.yml | 18 - .../RackSec/srslog/CODE_OF_CONDUCT.md | 50 - vendor/src/github.com/RackSec/srslog/LICENSE | 27 - .../src/github.com/RackSec/srslog/README.md | 131 - .../github.com/RackSec/srslog/constants.go | 68 - .../src/github.com/RackSec/srslog/dialer.go | 87 - .../github.com/RackSec/srslog/formatter.go | 48 - .../src/github.com/RackSec/srslog/framer.go | 24 - .../src/github.com/RackSec/srslog/net_conn.go | 30 - .../src/github.com/RackSec/srslog/srslog.go | 100 - .../github.com/RackSec/srslog/srslog_unix.go | 54 - .../src/github.com/RackSec/srslog/writer.go | 164 - vendor/src/github.com/agl/ed25519/LICENSE | 27 - vendor/src/github.com/agl/ed25519/ed25519.go | 125 - .../agl/ed25519/edwards25519/const.go | 1411 - .../agl/ed25519/edwards25519/edwards25519.go | 2127 - .../github.com/armon/go-metrics/.gitignore | 22 - .../src/github.com/armon/go-metrics/LICENSE | 20 - .../src/github.com/armon/go-metrics/README.md | 68 - .../github.com/armon/go-metrics/const_unix.go | 12 - .../armon/go-metrics/const_windows.go | 13 - .../src/github.com/armon/go-metrics/inmem.go | 239 - .../armon/go-metrics/inmem_signal.go | 100 - .../github.com/armon/go-metrics/metrics.go | 115 - .../src/github.com/armon/go-metrics/sink.go | 52 - .../src/github.com/armon/go-metrics/start.go | 95 - .../src/github.com/armon/go-metrics/statsd.go | 154 - .../github.com/armon/go-metrics/statsite.go | 142 - .../src/github.com/armon/go-radix/.gitignore | 22 - .../src/github.com/armon/go-radix/.travis.yml | 3 - vendor/src/github.com/armon/go-radix/LICENSE | 20 - .../src/github.com/armon/go-radix/README.md | 36 - vendor/src/github.com/armon/go-radix/radix.go | 467 - .../src/github.com/aws/aws-sdk-go/LICENSE.txt | 202 - .../aws/aws-sdk-go/aws/awserr/error.go | 145 - .../aws/aws-sdk-go/aws/awserr/types.go | 194 - .../aws/aws-sdk-go/aws/awsutil/copy.go | 100 - .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 - .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 - .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 - .../aws-sdk-go/aws/awsutil/string_value.go | 89 - .../aws/aws-sdk-go/aws/client/client.go | 120 - .../aws-sdk-go/aws/client/default_retryer.go | 90 - .../aws/client/metadata/client_info.go | 12 - .../github.com/aws/aws-sdk-go/aws/config.go | 358 - .../aws/aws-sdk-go/aws/convert_types.go | 369 - .../aws-sdk-go/aws/corehandlers/handlers.go | 152 - .../aws/corehandlers/param_validator.go | 17 - .../aws/credentials/chain_provider.go | 100 - .../aws-sdk-go/aws/credentials/credentials.go | 223 - .../ec2rolecreds/ec2_role_provider.go | 178 - .../aws/credentials/env_provider.go | 77 - .../aws-sdk-go/aws/credentials/example.ini | 12 - .../shared_credentials_provider.go | 151 - .../aws/credentials/static_provider.go | 48 - .../aws/aws-sdk-go/aws/defaults/defaults.go | 98 - .../aws/aws-sdk-go/aws/ec2metadata/api.go | 140 - .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 - .../github.com/aws/aws-sdk-go/aws/errors.go | 17 - .../github.com/aws/aws-sdk-go/aws/logger.go | 112 - .../aws/aws-sdk-go/aws/request/handlers.go | 187 - .../aws-sdk-go/aws/request/http_request.go | 33 - .../aws/request/http_request_1_4.go | 31 - .../aws-sdk-go/aws/request/offset_reader.go | 49 - .../aws/aws-sdk-go/aws/request/request.go | 329 - .../aws/request/request_pagination.go | 104 - .../aws/aws-sdk-go/aws/request/retryer.go | 101 - .../aws/aws-sdk-go/aws/request/validation.go | 234 - .../aws/aws-sdk-go/aws/session/session.go | 120 - .../github.com/aws/aws-sdk-go/aws/types.go | 106 - .../github.com/aws/aws-sdk-go/aws/version.go | 8 - .../vendor/golang.org/x/tools/LICENSE | 27 - .../aws-sdk-go/private/endpoints/endpoints.go | 65 - .../private/endpoints/endpoints.json | 75 - .../private/endpoints/endpoints_map.go | 88 - .../private/protocol/idempotency.go | 75 - .../private/protocol/json/jsonutil/build.go | 254 - .../protocol/json/jsonutil/unmarshal.go | 213 - .../private/protocol/jsonrpc/jsonrpc.go | 111 - .../aws-sdk-go/private/protocol/rest/build.go | 256 - .../private/protocol/rest/payload.go | 45 - .../private/protocol/rest/unmarshal.go | 198 - .../aws-sdk-go/private/protocol/unmarshal.go | 21 - .../private/signer/v4/header_rules.go | 82 - .../aws/aws-sdk-go/private/signer/v4/v4.go | 465 - .../aws-sdk-go/service/cloudwatchlogs/api.go | 3141 - .../service/cloudwatchlogs/service.go | 116 - .../beorn7/perks/quantile/exampledata.txt | 2388 - .../beorn7/perks/quantile/stream.go | 292 - vendor/src/github.com/boltdb/bolt/.gitignore | 4 - vendor/src/github.com/boltdb/bolt/LICENSE | 20 - vendor/src/github.com/boltdb/bolt/Makefile | 18 - vendor/src/github.com/boltdb/bolt/README.md | 850 - .../src/github.com/boltdb/bolt/appveyor.yml | 18 - vendor/src/github.com/boltdb/bolt/bolt_386.go | 7 - .../src/github.com/boltdb/bolt/bolt_amd64.go | 7 - vendor/src/github.com/boltdb/bolt/bolt_arm.go | 7 - .../src/github.com/boltdb/bolt/bolt_arm64.go | 9 - .../src/github.com/boltdb/bolt/bolt_linux.go | 10 - .../github.com/boltdb/bolt/bolt_openbsd.go | 27 - vendor/src/github.com/boltdb/bolt/bolt_ppc.go | 9 - .../src/github.com/boltdb/bolt/bolt_ppc64.go | 9 - .../github.com/boltdb/bolt/bolt_ppc64le.go | 9 - .../src/github.com/boltdb/bolt/bolt_s390x.go | 9 - .../src/github.com/boltdb/bolt/bolt_unix.go | 89 - .../boltdb/bolt/bolt_unix_solaris.go | 90 - .../github.com/boltdb/bolt/bolt_windows.go | 144 - .../github.com/boltdb/bolt/boltsync_unix.go | 8 - vendor/src/github.com/boltdb/bolt/bucket.go | 748 - vendor/src/github.com/boltdb/bolt/cursor.go | 400 - vendor/src/github.com/boltdb/bolt/db.go | 1036 - vendor/src/github.com/boltdb/bolt/doc.go | 44 - vendor/src/github.com/boltdb/bolt/errors.go | 71 - vendor/src/github.com/boltdb/bolt/freelist.go | 242 - vendor/src/github.com/boltdb/bolt/node.go | 599 - vendor/src/github.com/boltdb/bolt/page.go | 172 - vendor/src/github.com/boltdb/bolt/tx.go | 682 - .../src/github.com/cloudflare/cfssl/LICENSE | 24 - .../github.com/cloudflare/cfssl/api/api.go | 231 - .../github.com/cloudflare/cfssl/auth/auth.go | 94 - .../cloudflare/cfssl/certdb/README.md | 71 - .../cloudflare/cfssl/certdb/certdb.go | 40 - .../cloudflare/cfssl/config/config.go | 563 - .../cloudflare/cfssl/crypto/pkcs7/pkcs7.go | 188 - .../github.com/cloudflare/cfssl/csr/csr.go | 431 - .../github.com/cloudflare/cfssl/errors/doc.go | 46 - .../cloudflare/cfssl/errors/error.go | 420 - .../cloudflare/cfssl/errors/http.go | 47 - .../cfssl/helpers/derhelpers/derhelpers.go | 42 - .../cloudflare/cfssl/helpers/helpers.go | 479 - .../github.com/cloudflare/cfssl/info/info.go | 15 - .../cloudflare/cfssl/initca/initca.go | 223 - .../github.com/cloudflare/cfssl/log/log.go | 170 - .../cloudflare/cfssl/ocsp/config/config.go | 13 - .../cloudflare/cfssl/signer/local/local.go | 469 - .../cloudflare/cfssl/signer/signer.go | 410 - .../cloudflare/cfssl/whitelist/LICENSE | 13 - vendor/src/github.com/coreos/etcd/LICENSE | 202 - .../github.com/coreos/etcd/client/README.md | 110 - .../coreos/etcd/client/auth_role.go | 237 - .../coreos/etcd/client/auth_user.go | 324 - .../coreos/etcd/client/cancelreq.go | 20 - .../coreos/etcd/client/cancelreq_go14.go | 17 - .../github.com/coreos/etcd/client/client.go | 598 - .../coreos/etcd/client/cluster_error.go | 33 - .../src/github.com/coreos/etcd/client/curl.go | 70 - .../github.com/coreos/etcd/client/discover.go | 21 - .../src/github.com/coreos/etcd/client/doc.go | 71 - .../coreos/etcd/client/keys.generated.go | 1000 - .../src/github.com/coreos/etcd/client/keys.go | 663 - .../github.com/coreos/etcd/client/members.go | 304 - .../src/github.com/coreos/etcd/client/srv.go | 65 - .../src/github.com/coreos/etcd/client/util.go | 23 - .../src/github.com/coreos/etcd/pkg/crc/crc.go | 43 - .../coreos/etcd/pkg/fileutil/fileutil.go | 75 - .../coreos/etcd/pkg/fileutil/lock.go | 29 - .../coreos/etcd/pkg/fileutil/lock_plan9.go | 79 - .../coreos/etcd/pkg/fileutil/lock_solaris.go | 87 - .../coreos/etcd/pkg/fileutil/lock_unix.go | 65 - .../coreos/etcd/pkg/fileutil/lock_windows.go | 60 - .../pkg/fileutil/perallocate_unsupported.go | 28 - .../coreos/etcd/pkg/fileutil/preallocate.go | 42 - .../coreos/etcd/pkg/fileutil/purge.go | 80 - .../coreos/etcd/pkg/fileutil/sync.go | 26 - .../coreos/etcd/pkg/fileutil/sync_linux.go | 29 - .../github.com/coreos/etcd/pkg/idutil/id.go | 78 - .../coreos/etcd/pkg/pathutil/path.go | 31 - .../coreos/etcd/pkg/pbutil/pbutil.go | 60 - .../github.com/coreos/etcd/pkg/types/doc.go | 17 - .../github.com/coreos/etcd/pkg/types/id.go | 41 - .../github.com/coreos/etcd/pkg/types/set.go | 178 - .../github.com/coreos/etcd/pkg/types/slice.go | 22 - .../github.com/coreos/etcd/pkg/types/urls.go | 74 - .../coreos/etcd/pkg/types/urlsmap.go | 93 - .../src/github.com/coreos/etcd/raft/design.md | 57 - vendor/src/github.com/coreos/etcd/raft/doc.go | 293 - vendor/src/github.com/coreos/etcd/raft/log.go | 361 - .../coreos/etcd/raft/log_unstable.go | 139 - .../src/github.com/coreos/etcd/raft/logger.go | 126 - .../src/github.com/coreos/etcd/raft/node.go | 488 - .../github.com/coreos/etcd/raft/progress.go | 245 - .../src/github.com/coreos/etcd/raft/raft.go | 898 - .../coreos/etcd/raft/raftpb/raft.pb.go | 1768 - .../coreos/etcd/raft/raftpb/raft.proto | 86 - .../github.com/coreos/etcd/raft/rawnode.go | 228 - .../src/github.com/coreos/etcd/raft/status.go | 76 - .../github.com/coreos/etcd/raft/storage.go | 252 - .../src/github.com/coreos/etcd/raft/util.go | 116 - vendor/src/github.com/coreos/etcd/snap/db.go | 74 - .../github.com/coreos/etcd/snap/message.go | 59 - .../github.com/coreos/etcd/snap/metrics.go | 41 - .../coreos/etcd/snap/snappb/snap.pb.go | 332 - .../coreos/etcd/snap/snappb/snap.proto | 14 - .../coreos/etcd/snap/snapshotter.go | 189 - .../src/github.com/coreos/etcd/wal/decoder.go | 103 - vendor/src/github.com/coreos/etcd/wal/doc.go | 68 - .../src/github.com/coreos/etcd/wal/encoder.go | 89 - .../src/github.com/coreos/etcd/wal/metrics.go | 38 - .../coreos/etcd/wal/multi_readcloser.go | 45 - .../src/github.com/coreos/etcd/wal/repair.go | 106 - vendor/src/github.com/coreos/etcd/wal/util.go | 93 - vendor/src/github.com/coreos/etcd/wal/wal.go | 562 - .../coreos/etcd/wal/walpb/record.go | 29 - .../coreos/etcd/wal/walpb/record.pb.go | 495 - .../coreos/etcd/wal/walpb/record.proto | 20 - .../src/github.com/coreos/go-systemd/LICENSE | 191 - .../coreos/go-systemd/activation/files.go | 52 - .../coreos/go-systemd/activation/listeners.go | 62 - .../go-systemd/activation/packetconns.go | 37 - .../coreos/go-systemd/daemon/sdnotify.go | 31 - .../coreos/go-systemd/journal/journal.go | 178 - vendor/src/github.com/coreos/pkg/LICENSE | 202 - .../github.com/coreos/pkg/capnslog/README.md | 39 - .../coreos/pkg/capnslog/formatters.go | 106 - .../coreos/pkg/capnslog/glog_formatter.go | 96 - .../github.com/coreos/pkg/capnslog/init.go | 49 - .../coreos/pkg/capnslog/init_windows.go | 25 - .../coreos/pkg/capnslog/journald_formatter.go | 68 - .../coreos/pkg/capnslog/log_hijack.go | 39 - .../github.com/coreos/pkg/capnslog/logmap.go | 240 - .../coreos/pkg/capnslog/pkg_logger.go | 158 - .../coreos/pkg/capnslog/syslog_formatter.go | 65 - .../github.com/deckarep/golang-set/.gitignore | 22 - .../deckarep/golang-set/.travis.yml | 9 - .../github.com/deckarep/golang-set/LICENSE | 22 - .../github.com/deckarep/golang-set/README.md | 94 - .../src/github.com/deckarep/golang-set/set.go | 168 - .../deckarep/golang-set/threadsafe.go | 204 - .../deckarep/golang-set/threadunsafe.go | 246 - .../github.com/docker/containerd/LICENSE.code | 191 - .../github.com/docker/containerd/LICENSE.docs | 425 - .../containerd/api/grpc/types/api.pb.go | 1468 - .../containerd/api/grpc/types/api.proto | 313 - .../github.com/docker/distribution/.gitignore | 37 - .../github.com/docker/distribution/.mailmap | 18 - .../github.com/docker/distribution/AUTHORS | 147 - .../docker/distribution/BUILDING.md | 119 - .../docker/distribution/CHANGELOG.md | 35 - .../docker/distribution/CONTRIBUTING.md | 140 - .../github.com/docker/distribution/Dockerfile | 18 - .../github.com/docker/distribution/LICENSE | 202 - .../docker/distribution/MAINTAINERS | 58 - .../github.com/docker/distribution/Makefile | 106 - .../github.com/docker/distribution/README.md | 131 - .../github.com/docker/distribution/ROADMAP.md | 267 - .../github.com/docker/distribution/blobs.go | 245 - .../github.com/docker/distribution/circle.yml | 89 - .../docker/distribution/context/context.go | 85 - .../docker/distribution/context/doc.go | 89 - .../docker/distribution/context/http.go | 364 - .../docker/distribution/context/logger.go | 116 - .../docker/distribution/context/trace.go | 104 - .../docker/distribution/context/util.go | 24 - .../docker/distribution/context/version.go | 16 - .../docker/distribution/coverpkg.sh | 7 - .../docker/distribution/digest/digest.go | 139 - .../docker/distribution/digest/digester.go | 155 - .../docker/distribution/digest/doc.go | 42 - .../docker/distribution/digest/set.go | 245 - .../docker/distribution/digest/verifiers.go | 44 - .../src/github.com/docker/distribution/doc.go | 7 - .../github.com/docker/distribution/errors.go | 115 - .../docker/distribution/manifest/doc.go | 1 - .../manifest/manifestlist/manifestlist.go | 155 - .../manifest/schema1/config_builder.go | 283 - .../distribution/manifest/schema1/manifest.go | 184 - .../manifest/schema1/reference_builder.go | 98 - .../distribution/manifest/schema1/sign.go | 68 - .../distribution/manifest/schema1/verify.go | 32 - .../distribution/manifest/schema2/builder.go | 80 - .../distribution/manifest/schema2/manifest.go | 128 - .../docker/distribution/manifest/versioned.go | 12 - .../docker/distribution/manifests.go | 117 - .../distribution/reference/reference.go | 334 - .../docker/distribution/reference/regexp.go | 124 - .../docker/distribution/registry.go | 97 - .../registry/api/errcode/errors.go | 267 - .../registry/api/errcode/handler.go | 44 - .../registry/api/errcode/register.go | 138 - .../registry/api/v2/descriptors.go | 1569 - .../distribution/registry/api/v2/doc.go | 9 - .../distribution/registry/api/v2/errors.go | 136 - .../distribution/registry/api/v2/routes.go | 49 - .../distribution/registry/api/v2/urls.go | 251 - .../registry/client/auth/api_version.go | 58 - .../registry/client/auth/authchallenge.go | 220 - .../registry/client/auth/session.go | 497 - .../registry/client/blob_writer.go | 162 - .../distribution/registry/client/errors.go | 107 - .../registry/client/repository.go | 863 - .../registry/client/transport/http_reader.go | 250 - .../registry/client/transport/transport.go | 147 - .../registry/storage/cache/cache.go | 35 - .../cache/cachedblobdescriptorstore.go | 101 - .../registry/storage/cache/memory/memory.go | 170 - .../github.com/docker/distribution/tags.go | 27 - .../docker/distribution/uuid/uuid.go | 126 - .../docker/docker-credential-helpers/LICENSE | 20 - .../client/client.go | 70 - .../client/command.go | 37 - .../credentials/credentials.go | 129 - .../credentials/error.go | 37 - .../credentials/helper.go | 12 - .../github.com/docker/go-connections/LICENSE | 191 - .../docker/go-connections/nat/nat.go | 243 - .../docker/go-connections/nat/parse.go | 56 - .../docker/go-connections/nat/sort.go | 96 - .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 - .../docker/go-connections/sockets/proxy.go | 51 - .../docker/go-connections/sockets/sockets.go | 42 - .../go-connections/sockets/sockets_unix.go | 15 - .../go-connections/sockets/sockets_windows.go | 13 - .../go-connections/sockets/tcp_socket.go | 22 - .../go-connections/sockets/unix_socket.go | 80 - .../docker/go-connections/tlsconfig/config.go | 122 - .../tlsconfig/config_client_ciphers.go | 17 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - .../github.com/docker/go-events/.gitignore | 24 - .../src/github.com/docker/go-events/LICENSE | 201 - .../src/github.com/docker/go-events/README.md | 112 - .../github.com/docker/go-events/broadcast.go | 158 - .../github.com/docker/go-events/channel.go | 47 - .../src/github.com/docker/go-events/errors.go | 10 - .../src/github.com/docker/go-events/event.go | 15 - .../src/github.com/docker/go-events/filter.go | 52 - .../src/github.com/docker/go-events/queue.go | 104 - .../src/github.com/docker/go-events/retry.go | 249 - vendor/src/github.com/docker/go/LICENSE | 27 - .../docker/go/canonical/json/decode.go | 1094 - .../docker/go/canonical/json/encode.go | 1245 - .../docker/go/canonical/json/fold.go | 143 - .../docker/go/canonical/json/indent.go | 137 - .../docker/go/canonical/json/scanner.go | 630 - .../docker/go/canonical/json/stream.go | 487 - .../docker/go/canonical/json/tags.go | 44 - .../src/github.com/docker/libkv/.travis.yml | 31 - .../src/github.com/docker/libkv/LICENSE.code | 191 - .../src/github.com/docker/libkv/LICENSE.docs | 425 - .../src/github.com/docker/libkv/MAINTAINERS | 46 - vendor/src/github.com/docker/libkv/README.md | 107 - vendor/src/github.com/docker/libkv/libkv.go | 40 - .../docker/libkv/store/boltdb/boltdb.go | 469 - .../docker/libkv/store/consul/consul.go | 558 - .../docker/libkv/store/etcd/etcd.go | 606 - .../github.com/docker/libkv/store/helpers.go | 47 - .../github.com/docker/libkv/store/store.go | 132 - .../docker/libkv/store/zookeeper/zookeeper.go | 429 - .../docker/libnetwork/.dockerignore | 1 - .../github.com/docker/libnetwork/.gitignore | 39 - .../github.com/docker/libnetwork/CHANGELOG.md | 199 - .../docker/libnetwork/Dockerfile.build | 7 - .../src/github.com/docker/libnetwork/LICENSE | 202 - .../github.com/docker/libnetwork/MAINTAINERS | 64 - .../src/github.com/docker/libnetwork/Makefile | 108 - .../github.com/docker/libnetwork/README.md | 89 - .../github.com/docker/libnetwork/ROADMAP.md | 20 - .../src/github.com/docker/libnetwork/agent.go | 689 - .../github.com/docker/libnetwork/agent.pb.go | 948 - .../github.com/docker/libnetwork/agent.proto | 69 - .../docker/libnetwork/bitseq/sequence.go | 678 - .../docker/libnetwork/bitseq/store.go | 141 - .../github.com/docker/libnetwork/circle.yml | 17 - .../docker/libnetwork/cluster/provider.go | 10 - .../docker/libnetwork/cmd/proxy/main.go | 67 - .../docker/libnetwork/cmd/proxy/proxy.go | 37 - .../docker/libnetwork/cmd/proxy/stub_proxy.go | 31 - .../docker/libnetwork/cmd/proxy/tcp_proxy.go | 96 - .../docker/libnetwork/cmd/proxy/udp_proxy.go | 169 - .../docker/libnetwork/config/config.go | 256 - .../docker/libnetwork/config/libnetwork.toml | 12 - .../docker/libnetwork/controller.go | 1030 - .../docker/libnetwork/datastore/cache.go | 177 - .../docker/libnetwork/datastore/datastore.go | 625 - .../docker/libnetwork/datastore/mock_store.go | 129 - .../docker/libnetwork/default_gateway.go | 174 - .../libnetwork/default_gateway_freebsd.go | 7 - .../libnetwork/default_gateway_linux.go | 26 - .../libnetwork/default_gateway_solaris.go | 7 - .../libnetwork/default_gateway_windows.go | 7 - .../libnetwork/discoverapi/discoverapi.go | 59 - .../docker/libnetwork/driverapi/driverapi.go | 171 - .../docker/libnetwork/driverapi/errors.go | 56 - .../docker/libnetwork/driverapi/ipamdata.go | 103 - .../libnetwork/drivers/bridge/bridge.go | 1446 - .../libnetwork/drivers/bridge/bridge_store.go | 378 - .../libnetwork/drivers/bridge/errors.go | 341 - .../libnetwork/drivers/bridge/interface.go | 85 - .../libnetwork/drivers/bridge/labels.go | 18 - .../docker/libnetwork/drivers/bridge/link.go | 85 - .../bridge/netlink_deprecated_linux.go | 131 - .../netlink_deprecated_linux_armppc64.go | 7 - .../bridge/netlink_deprecated_linux_notarm.go | 7 - .../bridge/netlink_deprecated_unsupported.go | 18 - .../libnetwork/drivers/bridge/port_mapping.go | 128 - .../libnetwork/drivers/bridge/resolvconf.go | 67 - .../docker/libnetwork/drivers/bridge/setup.go | 26 - .../bridge/setup_bridgenetfiltering.go | 162 - .../libnetwork/drivers/bridge/setup_device.go | 68 - .../drivers/bridge/setup_firewalld.go | 20 - .../drivers/bridge/setup_ip_forwarding.go | 29 - .../drivers/bridge/setup_ip_tables.go | 348 - .../libnetwork/drivers/bridge/setup_ipv4.go | 62 - .../libnetwork/drivers/bridge/setup_ipv6.go | 119 - .../libnetwork/drivers/bridge/setup_verify.go | 53 - .../docker/libnetwork/drivers/host/host.go | 97 - .../libnetwork/drivers/ipvlan/ipvlan.go | 106 - .../drivers/ipvlan/ipvlan_endpoint.go | 87 - .../drivers/ipvlan/ipvlan_joinleave.go | 199 - .../drivers/ipvlan/ipvlan_network.go | 240 - .../libnetwork/drivers/ipvlan/ipvlan_setup.go | 205 - .../libnetwork/drivers/ipvlan/ipvlan_state.go | 115 - .../libnetwork/drivers/ipvlan/ipvlan_store.go | 349 - .../libnetwork/drivers/macvlan/macvlan.go | 108 - .../drivers/macvlan/macvlan_endpoint.go | 94 - .../drivers/macvlan/macvlan_joinleave.go | 144 - .../drivers/macvlan/macvlan_network.go | 248 - .../drivers/macvlan/macvlan_setup.go | 209 - .../drivers/macvlan/macvlan_state.go | 113 - .../drivers/macvlan/macvlan_store.go | 351 - .../docker/libnetwork/drivers/null/null.go | 97 - .../libnetwork/drivers/overlay/encryption.go | 562 - .../libnetwork/drivers/overlay/filter.go | 127 - .../libnetwork/drivers/overlay/joinleave.go | 224 - .../libnetwork/drivers/overlay/ov_endpoint.go | 259 - .../libnetwork/drivers/overlay/ov_network.go | 942 - .../libnetwork/drivers/overlay/ov_serf.go | 233 - .../libnetwork/drivers/overlay/ov_utils.go | 155 - .../libnetwork/drivers/overlay/overlay.go | 349 - .../libnetwork/drivers/overlay/overlay.pb.go | 468 - .../libnetwork/drivers/overlay/overlay.proto | 27 - .../drivers/overlay/ovmanager/ovmanager.go | 248 - .../libnetwork/drivers/overlay/peerdb.go | 341 - .../libnetwork/drivers/remote/api/api.go | 188 - .../libnetwork/drivers/remote/driver.go | 368 - .../libnetwork/drivers/windows/labels.go | 18 - .../libnetwork/drivers/windows/windows.go | 595 - .../libnetwork/drivers_experimental_linux.go | 11 - .../docker/libnetwork/drivers_freebsd.go | 13 - .../docker/libnetwork/drivers_ipam.go | 23 - .../docker/libnetwork/drivers_linux.go | 24 - .../docker/libnetwork/drivers_solaris.go | 5 - .../docker/libnetwork/drivers_stub_linux.go | 7 - .../docker/libnetwork/drivers_windows.go | 16 - .../libnetwork/drvregistry/drvregistry.go | 219 - .../github.com/docker/libnetwork/endpoint.go | 1119 - .../docker/libnetwork/endpoint_cnt.go | 169 - .../docker/libnetwork/endpoint_info.go | 469 - .../src/github.com/docker/libnetwork/error.go | 185 - .../docker/libnetwork/etchosts/etchosts.go | 208 - .../libnetwork/hostdiscovery/hostdiscovery.go | 121 - .../hostdiscovery/hostdiscovery_api.go | 22 - .../libnetwork/hostdiscovery/libnetwork.toml | 6 - .../github.com/docker/libnetwork/idm/idm.go | 60 - .../docker/libnetwork/ipam/allocator.go | 589 - .../docker/libnetwork/ipam/store.go | 136 - .../docker/libnetwork/ipam/structures.go | 362 - .../docker/libnetwork/ipam/utils.go | 81 - .../docker/libnetwork/ipamapi/contract.go | 89 - .../libnetwork/ipams/builtin/builtin_unix.go | 43 - .../ipams/builtin/builtin_windows.go | 16 - .../docker/libnetwork/ipams/null/null.go | 71 - .../docker/libnetwork/ipams/remote/api/api.go | 94 - .../docker/libnetwork/ipams/remote/remote.go | 137 - .../ipams/windowsipam/windowsipam.go | 100 - .../docker/libnetwork/ipamutils/utils.go | 50 - .../docker/libnetwork/iptables/firewalld.go | 168 - .../docker/libnetwork/iptables/iptables.go | 431 - .../docker/libnetwork/ipvs/constants.go | 130 - .../github.com/docker/libnetwork/ipvs/ipvs.go | 113 - .../docker/libnetwork/ipvs/netlink.go | 234 - .../src/github.com/docker/libnetwork/machines | 111 - .../docker/libnetwork/netlabel/labels.go | 123 - .../docker/libnetwork/netutils/utils.go | 194 - .../libnetwork/netutils/utils_freebsd.go | 21 - .../docker/libnetwork/netutils/utils_linux.go | 125 - .../libnetwork/netutils/utils_solaris.go | 32 - .../libnetwork/netutils/utils_windows.go | 21 - .../github.com/docker/libnetwork/network.go | 1518 - .../docker/libnetwork/networkdb/broadcast.go | 111 - .../docker/libnetwork/networkdb/cluster.go | 535 - .../docker/libnetwork/networkdb/delegate.go | 332 - .../libnetwork/networkdb/event_delegate.go | 24 - .../docker/libnetwork/networkdb/message.go | 102 - .../docker/libnetwork/networkdb/networkdb.go | 451 - .../libnetwork/networkdb/networkdb.pb.go | 2266 - .../libnetwork/networkdb/networkdb.proto | 156 - .../docker/libnetwork/networkdb/watch.go | 98 - .../docker/libnetwork/ns/init_linux.go | 86 - .../docker/libnetwork/options/options.go | 88 - .../libnetwork/osl/interface_freebsd.go | 4 - .../docker/libnetwork/osl/interface_linux.go | 440 - .../libnetwork/osl/interface_solaris.go | 4 - .../libnetwork/osl/interface_windows.go | 4 - .../docker/libnetwork/osl/namespace_linux.go | 489 - .../libnetwork/osl/namespace_unsupported.go | 12 - .../libnetwork/osl/namespace_windows.go | 39 - .../docker/libnetwork/osl/neigh_freebsd.go | 4 - .../docker/libnetwork/osl/neigh_linux.go | 146 - .../docker/libnetwork/osl/neigh_solaris.go | 4 - .../docker/libnetwork/osl/neigh_windows.go | 4 - .../docker/libnetwork/osl/options_linux.go | 79 - .../docker/libnetwork/osl/route_linux.go | 203 - .../docker/libnetwork/osl/sandbox.go | 171 - .../docker/libnetwork/osl/sandbox_freebsd.go | 40 - .../libnetwork/osl/sandbox_unsupported.go | 22 - .../libnetwork/portallocator/portallocator.go | 270 - .../docker/libnetwork/portmapper/mapper.go | 234 - .../libnetwork/portmapper/mock_proxy.go | 18 - .../docker/libnetwork/portmapper/proxy.go | 148 - .../docker/libnetwork/resolvconf/README.md | 1 - .../libnetwork/resolvconf/dns/resolvconf.go | 17 - .../libnetwork/resolvconf/resolvconf.go | 247 - .../github.com/docker/libnetwork/resolver.go | 623 - .../docker/libnetwork/resolver_unix.go | 101 - .../docker/libnetwork/resolver_windows.go | 7 - .../github.com/docker/libnetwork/sandbox.go | 1163 - .../docker/libnetwork/sandbox_dns_unix.go | 350 - .../docker/libnetwork/sandbox_dns_windows.go | 35 - .../docker/libnetwork/sandbox_externalkey.go | 12 - .../libnetwork/sandbox_externalkey_solaris.go | 45 - .../libnetwork/sandbox_externalkey_unix.go | 177 - .../libnetwork/sandbox_externalkey_windows.go | 45 - .../docker/libnetwork/sandbox_store.go | 278 - .../github.com/docker/libnetwork/service.go | 61 - .../docker/libnetwork/service_linux.go | 730 - .../docker/libnetwork/service_unsupported.go | 19 - .../src/github.com/docker/libnetwork/store.go | 475 - .../docker/libnetwork/types/types.go | 636 - .../github.com/docker/libnetwork/wrapmake.sh | 11 - .../docker/libtrust/CONTRIBUTING.md | 13 - vendor/src/github.com/docker/libtrust/LICENSE | 191 - .../github.com/docker/libtrust/MAINTAINERS | 3 - .../src/github.com/docker/libtrust/README.md | 18 - .../docker/libtrust/certificates.go | 175 - vendor/src/github.com/docker/libtrust/doc.go | 9 - .../src/github.com/docker/libtrust/ec_key.go | 428 - .../src/github.com/docker/libtrust/filter.go | 50 - vendor/src/github.com/docker/libtrust/hash.go | 56 - .../github.com/docker/libtrust/jsonsign.go | 657 - vendor/src/github.com/docker/libtrust/key.go | 253 - .../github.com/docker/libtrust/key_files.go | 255 - .../github.com/docker/libtrust/key_manager.go | 175 - .../src/github.com/docker/libtrust/rsa_key.go | 427 - vendor/src/github.com/docker/libtrust/util.go | 363 - .../src/github.com/docker/notary/.gitignore | 11 - .../src/github.com/docker/notary/CHANGELOG.md | 26 - .../github.com/docker/notary/CONTRIBUTING.md | 85 - .../src/github.com/docker/notary/CONTRIBUTORS | 4 - .../src/github.com/docker/notary/Dockerfile | 35 - vendor/src/github.com/docker/notary/LICENSE | 201 - .../src/github.com/docker/notary/MAINTAINERS | 58 - vendor/src/github.com/docker/notary/Makefile | 214 - .../github.com/docker/notary/NOTARY_VERSION | 1 - vendor/src/github.com/docker/notary/README.md | 99 - .../src/github.com/docker/notary/ROADMAP.md | 7 - .../src/github.com/docker/notary/circle.yml | 87 - .../docker/notary/client/changelist/change.go | 101 - .../notary/client/changelist/changelist.go | 59 - .../client/changelist/file_changelist.go | 176 - .../notary/client/changelist/interface.go | 70 - .../github.com/docker/notary/client/client.go | 927 - .../docker/notary/client/delegations.go | 294 - .../docker/notary/client/helpers.go | 237 - .../github.com/docker/notary/client/repo.go | 29 - .../docker/notary/client/repo_pkcs11.go | 34 - .../src/github.com/docker/notary/codecov.yml | 18 - vendor/src/github.com/docker/notary/const.go | 68 - .../src/github.com/docker/notary/coverpkg.sh | 10 - .../notary/cryptoservice/certificate.go | 41 - .../notary/cryptoservice/crypto_service.go | 155 - .../notary/cryptoservice/import_export.go | 313 - .../docker/notary/development.rethink.yml | 113 - .../github.com/docker/notary/development.yml | 36 - .../docker/notary/docker-compose.rethink.yml | 102 - .../docker/notary/docker-compose.yml | 34 - .../docker/notary/passphrase/passphrase.go | 201 - .../docker/notary/server.Dockerfile | 25 - .../docker/notary/signer.Dockerfile | 28 - .../docker/notary/trustmanager/filestore.go | 150 - .../notary/trustmanager/keyfilestore.go | 497 - .../docker/notary/trustmanager/keystore.go | 59 - .../docker/notary/trustmanager/memorystore.go | 67 - .../docker/notary/trustmanager/store.go | 42 - .../docker/notary/trustmanager/x509utils.go | 524 - .../notary/trustmanager/yubikey/non_pkcs11.go | 9 - .../trustmanager/yubikey/pkcs11_darwin.go | 9 - .../trustmanager/yubikey/pkcs11_interface.go | 40 - .../trustmanager/yubikey/pkcs11_linux.go | 10 - .../trustmanager/yubikey/yubikeystore.go | 920 - .../docker/notary/trustpinning/certs.go | 268 - .../docker/notary/trustpinning/trustpin.go | 114 - .../src/github.com/docker/notary/tuf/LICENSE | 30 - .../github.com/docker/notary/tuf/README.md | 36 - .../github.com/docker/notary/tuf/builder.go | 673 - .../docker/notary/tuf/client/client.go | 229 - .../docker/notary/tuf/client/errors.go | 14 - .../docker/notary/tuf/data/errors.go | 44 - .../github.com/docker/notary/tuf/data/keys.go | 528 - .../docker/notary/tuf/data/roles.go | 313 - .../github.com/docker/notary/tuf/data/root.go | 171 - .../docker/notary/tuf/data/serializer.go | 36 - .../docker/notary/tuf/data/snapshot.go | 169 - .../docker/notary/tuf/data/targets.go | 198 - .../docker/notary/tuf/data/timestamp.go | 136 - .../docker/notary/tuf/data/types.go | 276 - .../docker/notary/tuf/signed/ed25519.go | 107 - .../docker/notary/tuf/signed/errors.go | 91 - .../docker/notary/tuf/signed/interface.go | 49 - .../docker/notary/tuf/signed/sign.go | 113 - .../docker/notary/tuf/signed/verifiers.go | 283 - .../docker/notary/tuf/signed/verify.go | 107 - .../docker/notary/tuf/store/errors.go | 13 - .../docker/notary/tuf/store/filestore.go | 102 - .../docker/notary/tuf/store/httpstore.go | 297 - .../docker/notary/tuf/store/interfaces.go | 31 - .../docker/notary/tuf/store/memorystore.go | 107 - .../docker/notary/tuf/store/offlinestore.go | 53 - .../src/github.com/docker/notary/tuf/tuf.go | 1069 - .../docker/notary/tuf/utils/role_sort.go | 31 - .../docker/notary/tuf/utils/stack.go | 85 - .../docker/notary/tuf/utils/util.go | 109 - .../docker/notary/tuf/utils/utils.go | 152 - .../docker/notary/tuf/validation/errors.go | 126 - vendor/src/github.com/docker/swarmkit/LICENSE | 201 - .../github.com/docker/swarmkit/agent/agent.go | 359 - .../docker/swarmkit/agent/config.go | 55 - .../docker/swarmkit/agent/errors.go | 24 - .../docker/swarmkit/agent/exec/controller.go | 292 - .../agent/exec/controller_test.mock.go | 143 - .../docker/swarmkit/agent/exec/errors.go | 84 - .../docker/swarmkit/agent/exec/executor.go | 23 - .../docker/swarmkit/agent/helpers.go | 13 - .../github.com/docker/swarmkit/agent/node.go | 759 - .../docker/swarmkit/agent/reporter.go | 124 - .../docker/swarmkit/agent/session.go | 291 - .../docker/swarmkit/agent/storage.go | 216 - .../github.com/docker/swarmkit/agent/task.go | 243 - .../docker/swarmkit/agent/worker.go | 260 - .../github.com/docker/swarmkit/api/README.md | 8 - .../github.com/docker/swarmkit/api/ca.pb.go | 1650 - .../github.com/docker/swarmkit/api/ca.proto | 53 - .../docker/swarmkit/api/control.pb.go | 10075 - .../docker/swarmkit/api/control.proto | 273 - .../docker/swarmkit/api/dispatcher.pb.go | 2440 - .../docker/swarmkit/api/dispatcher.proto | 154 - .../swarmkit/api/duration/duration.pb.go | 456 - .../swarmkit/api/duration/duration.proto | 100 - .../docker/swarmkit/api/duration/gen.go | 3 - .../src/github.com/docker/swarmkit/api/gen.go | 3 - .../docker/swarmkit/api/health.pb.go | 714 - .../docker/swarmkit/api/health.proto | 34 - .../docker/swarmkit/api/objects.pb.go | 3593 - .../docker/swarmkit/api/objects.proto | 220 - .../github.com/docker/swarmkit/api/raft.pb.go | 3212 - .../github.com/docker/swarmkit/api/raft.proto | 128 - .../docker/swarmkit/api/snapshot.pb.go | 1115 - .../docker/swarmkit/api/snapshot.proto | 40 - .../docker/swarmkit/api/specs.pb.go | 4221 - .../docker/swarmkit/api/specs.proto | 257 - .../docker/swarmkit/api/timestamp/gen.go | 3 - .../swarmkit/api/timestamp/timestamp.pb.go | 469 - .../swarmkit/api/timestamp/timestamp.proto | 121 - .../docker/swarmkit/api/types.pb.go | 10648 -- .../docker/swarmkit/api/types.proto | 627 - .../src/github.com/docker/swarmkit/ca/auth.go | 213 - .../docker/swarmkit/ca/certificates.go | 765 - .../github.com/docker/swarmkit/ca/config.go | 546 - .../github.com/docker/swarmkit/ca/external.go | 141 - .../github.com/docker/swarmkit/ca/forward.go | 67 - .../github.com/docker/swarmkit/ca/server.go | 704 - .../docker/swarmkit/ca/transport.go | 202 - .../docker/swarmkit/identity/doc.go | 17 - .../docker/swarmkit/identity/randomid.go | 50 - .../docker/swarmkit/ioutils/ioutils.go | 40 - .../github.com/docker/swarmkit/log/context.go | 37 - .../github.com/docker/swarmkit/log/grpc.go | 8 - .../swarmkit/manager/allocator/allocator.go | 221 - .../docker/swarmkit/manager/allocator/doc.go | 18 - .../swarmkit/manager/allocator/network.go | 829 - .../networkallocator/networkallocator.go | 659 - .../networkallocator/portallocator.go | 244 - .../swarmkit/manager/controlapi/cluster.go | 198 - .../swarmkit/manager/controlapi/common.go | 86 - .../swarmkit/manager/controlapi/network.go | 254 - .../swarmkit/manager/controlapi/node.go | 288 - .../swarmkit/manager/controlapi/server.go | 27 - .../swarmkit/manager/controlapi/service.go | 351 - .../swarmkit/manager/controlapi/task.go | 136 - .../swarmkit/manager/dispatcher/dispatcher.go | 768 - .../manager/dispatcher/heartbeat/heartbeat.go | 39 - .../swarmkit/manager/dispatcher/nodes.go | 179 - .../swarmkit/manager/dispatcher/period.go | 28 - .../github.com/docker/swarmkit/manager/doc.go | 1 - .../docker/swarmkit/manager/health/health.go | 58 - .../swarmkit/manager/keymanager/keymanager.go | 232 - .../docker/swarmkit/manager/manager.go | 694 - .../swarmkit/manager/orchestrator/global.go | 424 - .../manager/orchestrator/replicated.go | 188 - .../swarmkit/manager/orchestrator/restart.go | 383 - .../swarmkit/manager/orchestrator/services.go | 247 - .../manager/orchestrator/task_reaper.go | 203 - .../swarmkit/manager/orchestrator/tasks.go | 233 - .../swarmkit/manager/orchestrator/updater.go | 233 - .../swarmkit/manager/raftpicker/cluster.go | 12 - .../swarmkit/manager/raftpicker/raftpicker.go | 133 - .../swarmkit/manager/scheduler/constraint.go | 97 - .../docker/swarmkit/manager/scheduler/expr.go | 96 - .../swarmkit/manager/scheduler/filter.go | 131 - .../manager/scheduler/indexed_node_heap.go | 153 - .../swarmkit/manager/scheduler/nodeinfo.go | 61 - .../swarmkit/manager/scheduler/pipeline.go | 54 - .../swarmkit/manager/scheduler/scheduler.go | 445 - .../docker/swarmkit/manager/state/doc.go | 32 - .../docker/swarmkit/manager/state/proposer.go | 17 - .../manager/state/raft/membership/cluster.go | 216 - .../swarmkit/manager/state/raft/raft.go | 1342 - .../swarmkit/manager/state/raft/storage.go | 389 - .../swarmkit/manager/state/raft/util.go | 83 - .../swarmkit/manager/state/raft/wait.go | 70 - .../swarmkit/manager/state/store/apply.go | 48 - .../docker/swarmkit/manager/state/store/by.go | 113 - .../swarmkit/manager/state/store/clusters.go | 227 - .../manager/state/store/combinators.go | 14 - .../swarmkit/manager/state/store/memory.go | 730 - .../swarmkit/manager/state/store/networks.go | 221 - .../swarmkit/manager/state/store/nodes.go | 254 - .../swarmkit/manager/state/store/object.go | 29 - .../swarmkit/manager/state/store/services.go | 221 - .../swarmkit/manager/state/store/tasks.go | 296 - .../docker/swarmkit/manager/state/watch.go | 488 - .../swarmkit/manager/state/watch/watch.go | 48 - .../docker/swarmkit/picker/picker.go | 333 - .../docker/swarmkit/protobuf/plugin/gen.go | 3 - .../swarmkit/protobuf/plugin/helpers.go | 11 - .../swarmkit/protobuf/plugin/plugin.pb.go | 464 - .../swarmkit/protobuf/plugin/plugin.proto | 25 - .../docker/swarmkit/protobuf/ptypes/doc.go | 9 - .../swarmkit/protobuf/ptypes/duration.go | 102 - .../swarmkit/protobuf/ptypes/timestamp.go | 135 - .../fluent/fluent-logger-golang/LICENSE | 13 - .../fluent-logger-golang/fluent/fluent.go | 269 - .../fluent-logger-golang/fluent/proto.go | 24 - .../fluent-logger-golang/fluent/proto_gen.go | 372 - .../fluent-logger-golang/fluent/version.go | 3 - vendor/src/github.com/go-ini/ini/.gitignore | 4 - vendor/src/github.com/go-ini/ini/LICENSE | 191 - vendor/src/github.com/go-ini/ini/README.md | 560 - vendor/src/github.com/go-ini/ini/README_ZH.md | 547 - vendor/src/github.com/go-ini/ini/ini.go | 1226 - vendor/src/github.com/go-ini/ini/struct.go | 350 - .../github.com/godbus/dbus/CONTRIBUTING.md | 50 - vendor/src/github.com/godbus/dbus/LICENSE | 25 - vendor/src/github.com/godbus/dbus/MAINTAINERS | 2 - .../github.com/godbus/dbus/README.markdown | 41 - vendor/src/github.com/godbus/dbus/auth.go | 253 - .../github.com/godbus/dbus/auth_external.go | 26 - .../src/github.com/godbus/dbus/auth_sha1.go | 102 - vendor/src/github.com/godbus/dbus/call.go | 36 - vendor/src/github.com/godbus/dbus/conn.go | 634 - .../src/github.com/godbus/dbus/conn_darwin.go | 21 - .../src/github.com/godbus/dbus/conn_other.go | 31 - vendor/src/github.com/godbus/dbus/dbus.go | 258 - vendor/src/github.com/godbus/dbus/decoder.go | 228 - vendor/src/github.com/godbus/dbus/doc.go | 63 - vendor/src/github.com/godbus/dbus/encoder.go | 208 - vendor/src/github.com/godbus/dbus/export.go | 468 - vendor/src/github.com/godbus/dbus/homedir.go | 28 - .../github.com/godbus/dbus/homedir_dynamic.go | 15 - .../github.com/godbus/dbus/homedir_static.go | 45 - vendor/src/github.com/godbus/dbus/message.go | 353 - vendor/src/github.com/godbus/dbus/object.go | 136 - vendor/src/github.com/godbus/dbus/sig.go | 257 - .../godbus/dbus/transport_darwin.go | 6 - .../godbus/dbus/transport_generic.go | 35 - .../github.com/godbus/dbus/transport_tcp.go | 43 - .../github.com/godbus/dbus/transport_unix.go | 196 - .../dbus/transport_unixcred_dragonfly.go | 95 - .../godbus/dbus/transport_unixcred_linux.go | 25 - vendor/src/github.com/godbus/dbus/variant.go | 139 - .../github.com/godbus/dbus/variant_lexer.go | 284 - .../github.com/godbus/dbus/variant_parser.go | 817 - vendor/src/github.com/gogo/protobuf/LICENSE | 36 - .../gogo/protobuf/gogoproto/Makefile | 36 - .../github.com/gogo/protobuf/gogoproto/doc.go | 168 - .../gogo/protobuf/gogoproto/gogo.pb.go | 661 - .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 - .../gogo/protobuf/gogoproto/gogo.proto | 120 - .../gogo/protobuf/gogoproto/helper.go | 308 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 228 - .../github.com/gogo/protobuf/proto/decode.go | 872 - .../gogo/protobuf/proto/decode_gogo.go | 169 - .../github.com/gogo/protobuf/proto/encode.go | 1325 - .../gogo/protobuf/proto/encode_gogo.go | 354 - .../github.com/gogo/protobuf/proto/equal.go | 276 - .../gogo/protobuf/proto/extensions.go | 518 - .../gogo/protobuf/proto/extensions_gogo.go | 236 - .../src/github.com/gogo/protobuf/proto/lib.go | 894 - .../gogo/protobuf/proto/lib_gogo.go | 40 - .../gogo/protobuf/proto/message_set.go | 280 - .../gogo/protobuf/proto/pointer_reflect.go | 479 - .../gogo/protobuf/proto/pointer_unsafe.go | 266 - .../protobuf/proto/pointer_unsafe_gogo.go | 108 - .../gogo/protobuf/proto/properties.go | 923 - .../gogo/protobuf/proto/properties_gogo.go | 64 - .../gogo/protobuf/proto/skip_gogo.go | 117 - .../github.com/gogo/protobuf/proto/text.go | 793 - .../gogo/protobuf/proto/text_gogo.go | 55 - .../gogo/protobuf/proto/text_parser.go | 849 - .../protoc-gen-gogo/descriptor/Makefile | 33 - .../descriptor/descriptor.pb.go | 2017 - .../protoc-gen-gogo/descriptor/gostring.go | 635 - .../protoc-gen-gogo/descriptor/helper.go | 355 - .../gogo/protobuf/sortkeys/sortkeys.go | 99 - vendor/src/github.com/golang/mock/LICENSE | 202 - .../src/github.com/golang/mock/gomock/call.go | 268 - .../github.com/golang/mock/gomock/callset.go | 76 - .../golang/mock/gomock/controller.go | 167 - .../github.com/golang/mock/gomock/matchers.go | 97 - vendor/src/github.com/golang/protobuf/LICENSE | 31 - .../github.com/golang/protobuf/proto/Makefile | 43 - .../github.com/golang/protobuf/proto/clone.go | 223 - .../golang/protobuf/proto/decode.go | 867 - .../golang/protobuf/proto/encode.go | 1325 - .../github.com/golang/protobuf/proto/equal.go | 276 - .../golang/protobuf/proto/extensions.go | 399 - .../github.com/golang/protobuf/proto/lib.go | 894 - .../golang/protobuf/proto/message_set.go | 280 - .../golang/protobuf/proto/pointer_reflect.go | 479 - .../golang/protobuf/proto/pointer_unsafe.go | 266 - .../golang/protobuf/proto/properties.go | 846 - .../github.com/golang/protobuf/proto/text.go | 762 - .../golang/protobuf/proto/text_parser.go | 806 - .../google/certificate-transparency/LICENSE | 202 - .../certificate-transparency/go/README.md | 25 - .../certificate-transparency/go/asn1/asn1.go | 956 - .../go/asn1/common.go | 163 - .../go/asn1/marshal.go | 581 - .../go/client/logclient.go | 357 - .../go/serialization.go | 512 - .../certificate-transparency/go/signatures.go | 131 - .../certificate-transparency/go/types.go | 363 - .../go/x509/cert_pool.go | 116 - .../go/x509/pem_decrypt.go | 233 - .../certificate-transparency/go/x509/pkcs1.go | 124 - .../certificate-transparency/go/x509/pkcs8.go | 56 - .../go/x509/pkix/pkix.go | 173 - .../certificate-transparency/go/x509/root.go | 17 - .../go/x509/root_darwin.go | 83 - .../go/x509/root_plan9.go | 33 - .../go/x509/root_stub.go | 14 - .../go/x509/root_unix.go | 37 - .../go/x509/root_windows.go | 229 - .../certificate-transparency/go/x509/sec1.go | 85 - .../go/x509/verify.go | 476 - .../certificate-transparency/go/x509/x509.go | 1622 - .../github.com/gorilla/context/.travis.yml | 7 - vendor/src/github.com/gorilla/context/LICENSE | 27 - .../src/github.com/gorilla/context/README.md | 7 - .../src/github.com/gorilla/context/context.go | 143 - vendor/src/github.com/gorilla/context/doc.go | 82 - vendor/src/github.com/gorilla/mux/.travis.yml | 7 - vendor/src/github.com/gorilla/mux/LICENSE | 27 - vendor/src/github.com/gorilla/mux/README.md | 7 - vendor/src/github.com/gorilla/mux/doc.go | 199 - vendor/src/github.com/gorilla/mux/mux.go | 353 - vendor/src/github.com/gorilla/mux/regexp.go | 276 - vendor/src/github.com/gorilla/mux/route.go | 524 - .../src/github.com/hashicorp/consul/LICENSE | 354 - .../github.com/hashicorp/consul/api/README.md | 39 - .../github.com/hashicorp/consul/api/acl.go | 140 - .../github.com/hashicorp/consul/api/agent.go | 334 - .../github.com/hashicorp/consul/api/api.go | 442 - .../hashicorp/consul/api/catalog.go | 182 - .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 136 - .../src/github.com/hashicorp/consul/api/kv.go | 236 - .../github.com/hashicorp/consul/api/lock.go | 326 - .../github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 477 - .../hashicorp/consul/api/session.go | 201 - .../github.com/hashicorp/consul/api/status.go | 43 - .../hashicorp/consul/website/LICENSE.md | 10 - .../hashicorp/go-immutable-radix/.gitignore | 24 - .../hashicorp/go-immutable-radix/.travis.yml | 3 - .../hashicorp/go-immutable-radix/LICENSE | 363 - .../hashicorp/go-immutable-radix/README.md | 41 - .../hashicorp/go-immutable-radix/edges.go | 21 - .../hashicorp/go-immutable-radix/iradix.go | 333 - .../hashicorp/go-immutable-radix/iter.go | 81 - .../hashicorp/go-immutable-radix/node.go | 289 - .../github.com/hashicorp/go-memdb/.gitignore | 24 - .../src/github.com/hashicorp/go-memdb/LICENSE | 363 - .../github.com/hashicorp/go-memdb/README.md | 93 - .../github.com/hashicorp/go-memdb/index.go | 330 - .../github.com/hashicorp/go-memdb/memdb.go | 89 - .../github.com/hashicorp/go-memdb/schema.go | 76 - .../src/github.com/hashicorp/go-memdb/txn.go | 475 - .../github.com/hashicorp/go-msgpack/LICENSE | 25 - .../hashicorp/go-msgpack/codec/0doc.go | 143 - .../hashicorp/go-msgpack/codec/README.md | 174 - .../hashicorp/go-msgpack/codec/binc.go | 786 - .../hashicorp/go-msgpack/codec/decode.go | 1048 - .../hashicorp/go-msgpack/codec/encode.go | 1001 - .../hashicorp/go-msgpack/codec/helper.go | 589 - .../go-msgpack/codec/helper_internal.go | 127 - .../hashicorp/go-msgpack/codec/msgpack.go | 816 - .../go-msgpack/codec/msgpack_test.py | 110 - .../hashicorp/go-msgpack/codec/rpc.go | 152 - .../hashicorp/go-msgpack/codec/simple.go | 461 - .../hashicorp/go-msgpack/codec/time.go | 193 - .../hashicorp/go-multierror/LICENSE | 353 - .../hashicorp/go-multierror/README.md | 91 - .../hashicorp/go-multierror/append.go | 30 - .../hashicorp/go-multierror/format.go | 23 - .../hashicorp/go-multierror/multierror.go | 51 - .../github.com/hashicorp/golang-lru/LICENSE | 362 - .../hashicorp/golang-lru/simplelru/lru.go | 160 - .../hashicorp/memberlist/.gitignore | 25 - .../github.com/hashicorp/memberlist/LICENSE | 354 - .../github.com/hashicorp/memberlist/Makefile | 14 - .../github.com/hashicorp/memberlist/README.md | 144 - .../hashicorp/memberlist/alive_delegate.go | 14 - .../hashicorp/memberlist/broadcast.go | 100 - .../github.com/hashicorp/memberlist/config.go | 232 - .../hashicorp/memberlist/conflict_delegate.go | 10 - .../hashicorp/memberlist/delegate.go | 37 - .../hashicorp/memberlist/event_delegate.go | 61 - .../hashicorp/memberlist/keyring.go | 149 - .../hashicorp/memberlist/logging.go | 22 - .../hashicorp/memberlist/memberlist.go | 658 - .../hashicorp/memberlist/merge_delegate.go | 14 - .../github.com/hashicorp/memberlist/net.go | 1039 - .../hashicorp/memberlist/ping_delegate.go | 14 - .../github.com/hashicorp/memberlist/queue.go | 167 - .../hashicorp/memberlist/security.go | 198 - .../github.com/hashicorp/memberlist/state.go | 1012 - .../github.com/hashicorp/memberlist/todo.md | 6 - .../github.com/hashicorp/memberlist/util.go | 392 - vendor/src/github.com/hashicorp/serf/LICENSE | 354 - .../hashicorp/serf/coordinate/client.go | 180 - .../hashicorp/serf/coordinate/config.go | 70 - .../hashicorp/serf/coordinate/coordinate.go | 183 - .../hashicorp/serf/coordinate/phantom.go | 187 - .../hashicorp/serf/serf/broadcast.go | 27 - .../hashicorp/serf/serf/coalesce.go | 80 - .../hashicorp/serf/serf/coalesce_member.go | 68 - .../hashicorp/serf/serf/coalesce_user.go | 52 - .../github.com/hashicorp/serf/serf/config.go | 251 - .../hashicorp/serf/serf/conflict_delegate.go | 13 - .../hashicorp/serf/serf/delegate.go | 254 - .../github.com/hashicorp/serf/serf/event.go | 168 - .../hashicorp/serf/serf/event_delegate.go | 21 - .../hashicorp/serf/serf/internal_query.go | 312 - .../hashicorp/serf/serf/keymanager.go | 166 - .../github.com/hashicorp/serf/serf/lamport.go | 45 - .../hashicorp/serf/serf/merge_delegate.go | 44 - .../hashicorp/serf/serf/messages.go | 147 - .../hashicorp/serf/serf/ping_delegate.go | 89 - .../github.com/hashicorp/serf/serf/query.go | 210 - .../github.com/hashicorp/serf/serf/serf.go | 1692 - .../hashicorp/serf/serf/snapshot.go | 560 - .../hashicorp/serf/website/source/LICENSE | 10 - .../src/github.com/imdario/mergo/.travis.yml | 2 - vendor/src/github.com/imdario/mergo/LICENSE | 28 - vendor/src/github.com/imdario/mergo/README.md | 122 - vendor/src/github.com/imdario/mergo/doc.go | 44 - vendor/src/github.com/imdario/mergo/map.go | 154 - vendor/src/github.com/imdario/mergo/merge.go | 120 - vendor/src/github.com/imdario/mergo/mergo.go | 90 - .../inconshreveable/mousetrap/LICENSE | 13 - .../inconshreveable/mousetrap/README.md | 23 - .../inconshreveable/mousetrap/trap_others.go | 15 - .../inconshreveable/mousetrap/trap_windows.go | 98 - .../mousetrap/trap_windows_1.4.go | 46 - .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 9 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 44 - .../github.com/jmespath/go-jmespath/README.md | 7 - .../github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 - .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 - .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - vendor/src/github.com/kr/pty/.gitignore | 4 - vendor/src/github.com/kr/pty/License | 23 - vendor/src/github.com/kr/pty/README.md | 36 - vendor/src/github.com/kr/pty/doc.go | 16 - vendor/src/github.com/kr/pty/ioctl.go | 11 - vendor/src/github.com/kr/pty/ioctl_bsd.go | 39 - vendor/src/github.com/kr/pty/mktypes.bash | 19 - vendor/src/github.com/kr/pty/pty_darwin.go | 60 - vendor/src/github.com/kr/pty/pty_freebsd.go | 73 - vendor/src/github.com/kr/pty/pty_linux.go | 46 - .../src/github.com/kr/pty/pty_unsupported.go | 11 - vendor/src/github.com/kr/pty/run.go | 28 - vendor/src/github.com/kr/pty/types.go | 10 - vendor/src/github.com/kr/pty/types_freebsd.go | 15 - vendor/src/github.com/kr/pty/util.go | 35 - vendor/src/github.com/kr/pty/ztypes_386.go | 9 - vendor/src/github.com/kr/pty/ztypes_amd64.go | 9 - vendor/src/github.com/kr/pty/ztypes_arm.go | 9 - vendor/src/github.com/kr/pty/ztypes_arm64.go | 11 - .../github.com/kr/pty/ztypes_freebsd_386.go | 13 - .../github.com/kr/pty/ztypes_freebsd_amd64.go | 14 - .../github.com/kr/pty/ztypes_freebsd_arm.go | 13 - vendor/src/github.com/kr/pty/ztypes_ppc64.go | 11 - .../src/github.com/kr/pty/ztypes_ppc64le.go | 11 - vendor/src/github.com/kr/pty/ztypes_s390x.go | 11 - .../mattn/go-shellwords/.travis.yml | 9 - .../github.com/mattn/go-shellwords/README.md | 47 - .../mattn/go-shellwords/shellwords.go | 134 - .../mattn/go-shellwords/util_posix.go | 19 - .../mattn/go-shellwords/util_windows.go | 17 - .../github.com/mattn/go-sqlite3/.gitignore | 3 - .../github.com/mattn/go-sqlite3/.travis.yml | 9 - .../src/github.com/mattn/go-sqlite3/LICENSE | 21 - .../src/github.com/mattn/go-sqlite3/README.md | 70 - .../src/github.com/mattn/go-sqlite3/backup.go | 70 - .../mattn/go-sqlite3/code/sqlite3-binding.c | 147782 --------------- .../mattn/go-sqlite3/code/sqlite3-binding.h | 7478 - .../mattn/go-sqlite3/code/sqlite3ext.h | 487 - vendor/src/github.com/mattn/go-sqlite3/doc.go | 95 - .../src/github.com/mattn/go-sqlite3/error.go | 128 - .../mattn/go-sqlite3/sqlite3-binding.c | 4 - .../mattn/go-sqlite3/sqlite3-binding.h | 5 - .../github.com/mattn/go-sqlite3/sqlite3.go | 660 - .../mattn/go-sqlite3/sqlite3_icu.go | 13 - .../mattn/go-sqlite3/sqlite3_libsqlite3.go | 13 - .../go-sqlite3/sqlite3_load_extension.go | 39 - .../go-sqlite3/sqlite3_omit_load_extension.go | 19 - .../mattn/go-sqlite3/sqlite3_other.go | 13 - .../mattn/go-sqlite3/sqlite3_windows.go | 14 - .../golang_protobuf_extensions/LICENSE | 201 - .../pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../pbutil/encode.go | 46 - vendor/src/github.com/miekg/dns/.gitignore | 4 - vendor/src/github.com/miekg/dns/.travis.yml | 7 - vendor/src/github.com/miekg/dns/AUTHORS | 1 - vendor/src/github.com/miekg/dns/CONTRIBUTORS | 9 - vendor/src/github.com/miekg/dns/COPYRIGHT | 9 - vendor/src/github.com/miekg/dns/LICENSE | 32 - vendor/src/github.com/miekg/dns/README.md | 153 - vendor/src/github.com/miekg/dns/client.go | 385 - .../src/github.com/miekg/dns/clientconfig.go | 99 - vendor/src/github.com/miekg/dns/defaults.go | 278 - vendor/src/github.com/miekg/dns/dns.go | 100 - vendor/src/github.com/miekg/dns/dnssec.go | 664 - .../src/github.com/miekg/dns/dnssec_keygen.go | 156 - .../github.com/miekg/dns/dnssec_keyscan.go | 249 - .../github.com/miekg/dns/dnssec_privkey.go | 85 - vendor/src/github.com/miekg/dns/doc.go | 251 - vendor/src/github.com/miekg/dns/edns.go | 505 - vendor/src/github.com/miekg/dns/format.go | 96 - vendor/src/github.com/miekg/dns/labels.go | 162 - vendor/src/github.com/miekg/dns/msg.go | 1945 - vendor/src/github.com/miekg/dns/nsecx.go | 112 - vendor/src/github.com/miekg/dns/privaterr.go | 117 - vendor/src/github.com/miekg/dns/rawmsg.go | 95 - vendor/src/github.com/miekg/dns/sanitize.go | 84 - vendor/src/github.com/miekg/dns/scanner.go | 43 - vendor/src/github.com/miekg/dns/server.go | 690 - vendor/src/github.com/miekg/dns/sig0.go | 216 - .../github.com/miekg/dns/singleinflight.go | 57 - vendor/src/github.com/miekg/dns/tlsa.go | 86 - vendor/src/github.com/miekg/dns/tsig.go | 320 - vendor/src/github.com/miekg/dns/types.go | 1328 - .../github.com/miekg/dns/types_generate.go | 266 - vendor/src/github.com/miekg/dns/udp.go | 58 - vendor/src/github.com/miekg/dns/udp_linux.go | 73 - vendor/src/github.com/miekg/dns/udp_other.go | 17 - .../src/github.com/miekg/dns/udp_windows.go | 34 - vendor/src/github.com/miekg/dns/update.go | 94 - vendor/src/github.com/miekg/dns/xfr.go | 244 - vendor/src/github.com/miekg/dns/zgenerate.go | 158 - vendor/src/github.com/miekg/dns/zscan.go | 974 - vendor/src/github.com/miekg/dns/zscan_rr.go | 2270 - vendor/src/github.com/miekg/dns/ztypes.go | 842 - vendor/src/github.com/miekg/pkcs11/.gitignore | 1 - .../src/github.com/miekg/pkcs11/.travis.yml | 14 - vendor/src/github.com/miekg/pkcs11/LICENSE | 27 - vendor/src/github.com/miekg/pkcs11/README.md | 64 - vendor/src/github.com/miekg/pkcs11/const.go | 565 - vendor/src/github.com/miekg/pkcs11/error.go | 98 - vendor/src/github.com/miekg/pkcs11/hsm.db | Bin 10240 -> 0 bytes vendor/src/github.com/miekg/pkcs11/pkcs11.go | 1575 - vendor/src/github.com/miekg/pkcs11/pkcs11.h | 299 - vendor/src/github.com/miekg/pkcs11/pkcs11f.h | 910 - vendor/src/github.com/miekg/pkcs11/pkcs11t.h | 1885 - .../src/github.com/miekg/pkcs11/softhsm.conf | 1 - vendor/src/github.com/miekg/pkcs11/types.go | 267 - .../mreiferson/go-httpclient/.gitignore | 1 - .../mreiferson/go-httpclient/.travis.yml | 11 - .../mreiferson/go-httpclient/LICENSE | 17 - .../mreiferson/go-httpclient/README.md | 41 - .../mreiferson/go-httpclient/httpclient.go | 237 - vendor/src/github.com/philhofer/fwd/README.md | 311 - vendor/src/github.com/philhofer/fwd/reader.go | 358 - vendor/src/github.com/philhofer/fwd/writer.go | 224 - .../philhofer/fwd/writer_appengine.go | 5 - .../github.com/philhofer/fwd/writer_unsafe.go | 18 - .../github.com/pivotal-golang/clock/LICENSE | 202 - .../github.com/pivotal-golang/clock/README.md | 1 - .../github.com/pivotal-golang/clock/clock.go | 42 - .../github.com/pivotal-golang/clock/ticker.go | 20 - .../github.com/pivotal-golang/clock/timer.go | 25 - vendor/src/github.com/pkg/errors/.gitignore | 24 - vendor/src/github.com/pkg/errors/.travis.yml | 10 - vendor/src/github.com/pkg/errors/LICENSE | 24 - vendor/src/github.com/pkg/errors/README.md | 50 - vendor/src/github.com/pkg/errors/appveyor.yml | 32 - vendor/src/github.com/pkg/errors/errors.go | 211 - vendor/src/github.com/pkg/errors/stack.go | 165 - .../prometheus/client_golang/LICENSE | 201 - .../client_golang/prometheus/.gitignore | 1 - .../client_golang/prometheus/README.md | 53 - .../client_golang/prometheus/collector.go | 75 - .../client_golang/prometheus/counter.go | 175 - .../client_golang/prometheus/desc.go | 201 - .../client_golang/prometheus/doc.go | 109 - .../client_golang/prometheus/expvar.go | 119 - .../client_golang/prometheus/gauge.go | 147 - .../client_golang/prometheus/go_collector.go | 263 - .../client_golang/prometheus/histogram.go | 450 - .../client_golang/prometheus/http.go | 361 - .../client_golang/prometheus/metric.go | 166 - .../prometheus/process_collector.go | 142 - .../client_golang/prometheus/push.go | 65 - .../client_golang/prometheus/registry.go | 726 - .../client_golang/prometheus/summary.go | 540 - .../client_golang/prometheus/untyped.go | 145 - .../client_golang/prometheus/value.go | 234 - .../client_golang/prometheus/vec.go | 247 - .../prometheus/client_model/LICENSE | 201 - .../prometheus/client_model/go/metrics.pb.go | 364 - .../prometheus/client_model/ruby/LICENSE | 201 - .../src/github.com/prometheus/common/LICENSE | 201 - .../prometheus/common/expfmt/decode.go | 411 - .../prometheus/common/expfmt/encode.go | 88 - .../prometheus/common/expfmt/expfmt.go | 40 - .../prometheus/common/expfmt/fuzz.go | 36 - .../prometheus/common/expfmt/json_decode.go | 162 - .../prometheus/common/expfmt/text_create.go | 305 - .../prometheus/common/expfmt/text_parse.go | 746 - .../prometheus/common/model/alert.go | 109 - .../prometheus/common/model/fingerprinting.go | 105 - .../prometheus/common/model/labels.go | 188 - .../prometheus/common/model/labelset.go | 153 - .../prometheus/common/model/metric.go | 81 - .../prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 190 - .../prometheus/common/model/silence.go | 60 - .../prometheus/common/model/time.go | 230 - .../prometheus/common/model/value.go | 395 - .../github.com/prometheus/procfs/.travis.yml | 7 - .../github.com/prometheus/procfs/AUTHORS.md | 20 - .../prometheus/procfs/CONTRIBUTING.md | 18 - .../src/github.com/prometheus/procfs/LICENSE | 201 - .../src/github.com/prometheus/procfs/Makefile | 6 - .../src/github.com/prometheus/procfs/NOTICE | 7 - .../github.com/prometheus/procfs/README.md | 10 - .../src/github.com/prometheus/procfs/doc.go | 45 - vendor/src/github.com/prometheus/procfs/fs.go | 40 - .../src/github.com/prometheus/procfs/ipvs.go | 223 - .../github.com/prometheus/procfs/mdstat.go | 158 - .../src/github.com/prometheus/procfs/proc.go | 202 - .../github.com/prometheus/procfs/proc_io.go | 54 - .../prometheus/procfs/proc_limits.go | 111 - .../github.com/prometheus/procfs/proc_stat.go | 175 - .../src/github.com/prometheus/procfs/stat.go | 55 - .../github.com/samuel/go-zookeeper/LICENSE | 25 - .../github.com/samuel/go-zookeeper/zk/conn.go | 844 - .../samuel/go-zookeeper/zk/constants.go | 242 - .../github.com/samuel/go-zookeeper/zk/flw.go | 288 - .../github.com/samuel/go-zookeeper/zk/lock.go | 131 - .../samuel/go-zookeeper/zk/server_help.go | 119 - .../samuel/go-zookeeper/zk/server_java.go | 136 - .../samuel/go-zookeeper/zk/structs.go | 633 - .../samuel/go-zookeeper/zk/tracer.go | 148 - .../github.com/samuel/go-zookeeper/zk/util.go | 54 - .../seccomp/libseccomp-golang/LICENSE | 22 - .../seccomp/libseccomp-golang/README | 26 - .../seccomp/libseccomp-golang/seccomp.go | 857 - .../libseccomp-golang/seccomp_internal.go | 506 - vendor/src/github.com/spf13/cobra/.gitignore | 24 - vendor/src/github.com/spf13/cobra/.mailmap | 3 - vendor/src/github.com/spf13/cobra/.travis.yml | 18 - vendor/src/github.com/spf13/cobra/LICENSE.txt | 174 - vendor/src/github.com/spf13/cobra/README.md | 928 - vendor/src/github.com/spf13/cobra/args.go | 98 - .../spf13/cobra/bash_completions.go | 602 - .../spf13/cobra/bash_completions.md | 206 - vendor/src/github.com/spf13/cobra/cobra.go | 171 - vendor/src/github.com/spf13/cobra/command.go | 1260 - .../github.com/spf13/cobra/command_notwin.go | 5 - .../src/github.com/spf13/cobra/command_win.go | 26 - vendor/src/github.com/spf13/pflag/.travis.yml | 17 - vendor/src/github.com/spf13/pflag/LICENSE | 28 - vendor/src/github.com/spf13/pflag/README.md | 256 - vendor/src/github.com/spf13/pflag/bool.go | 97 - vendor/src/github.com/spf13/pflag/count.go | 97 - vendor/src/github.com/spf13/pflag/duration.go | 86 - vendor/src/github.com/spf13/pflag/flag.go | 934 - vendor/src/github.com/spf13/pflag/float32.go | 91 - vendor/src/github.com/spf13/pflag/float64.go | 87 - .../src/github.com/spf13/pflag/golangflag.go | 104 - vendor/src/github.com/spf13/pflag/int.go | 87 - vendor/src/github.com/spf13/pflag/int32.go | 91 - vendor/src/github.com/spf13/pflag/int64.go | 87 - vendor/src/github.com/spf13/pflag/int8.go | 91 - .../src/github.com/spf13/pflag/int_slice.go | 128 - vendor/src/github.com/spf13/pflag/ip.go | 96 - vendor/src/github.com/spf13/pflag/ipmask.go | 122 - vendor/src/github.com/spf13/pflag/ipnet.go | 100 - vendor/src/github.com/spf13/pflag/string.go | 82 - .../github.com/spf13/pflag/string_slice.go | 111 - vendor/src/github.com/spf13/pflag/uint.go | 91 - vendor/src/github.com/spf13/pflag/uint16.go | 89 - vendor/src/github.com/spf13/pflag/uint32.go | 89 - vendor/src/github.com/spf13/pflag/uint64.go | 91 - vendor/src/github.com/spf13/pflag/uint8.go | 91 - .../github.com/syndtr/gocapability/LICENSE | 24 - .../gocapability/capability/capability.go | 72 - .../capability/capability_linux.go | 608 - .../capability/capability_noop.go | 19 - .../syndtr/gocapability/capability/enum.go | 264 - .../gocapability/capability/enum_gen.go | 129 - .../gocapability/capability/syscall_linux.go | 145 - .../src/github.com/tchap/go-patricia/LICENSE | 20 - .../tchap/go-patricia/patricia/children.go | 244 - .../tchap/go-patricia/patricia/patricia.go | 467 - vendor/src/github.com/tinylib/msgp/LICENSE | 8 - .../github.com/tinylib/msgp/msgp/circular.go | 38 - .../src/github.com/tinylib/msgp/msgp/defs.go | 142 - .../src/github.com/tinylib/msgp/msgp/edit.go | 241 - .../github.com/tinylib/msgp/msgp/elsize.go | 99 - .../github.com/tinylib/msgp/msgp/errors.go | 142 - .../github.com/tinylib/msgp/msgp/extension.go | 548 - .../github.com/tinylib/msgp/msgp/integers.go | 174 - .../src/github.com/tinylib/msgp/msgp/json.go | 542 - .../tinylib/msgp/msgp/json_bytes.go | 363 - .../github.com/tinylib/msgp/msgp/number.go | 140 - .../tinylib/msgp/msgp/number_appengine.go | 101 - .../tinylib/msgp/msgp/number_unsafe.go | 159 - .../src/github.com/tinylib/msgp/msgp/read.go | 1118 - .../tinylib/msgp/msgp/read_bytes.go | 1073 - .../src/github.com/tinylib/msgp/msgp/size.go | 38 - .../src/github.com/tinylib/msgp/msgp/write.go | 768 - .../tinylib/msgp/msgp/write_bytes.go | 369 - vendor/src/github.com/ugorji/go/LICENSE | 22 - vendor/src/github.com/ugorji/go/codec/0doc.go | 193 - .../src/github.com/ugorji/go/codec/README.md | 148 - vendor/src/github.com/ugorji/go/codec/binc.go | 918 - vendor/src/github.com/ugorji/go/codec/cbor.go | 584 - .../src/github.com/ugorji/go/codec/decode.go | 2015 - .../src/github.com/ugorji/go/codec/encode.go | 1405 - .../ugorji/go/codec/fast-path.generated.go | 38900 ---- .../ugorji/go/codec/fast-path.go.tmpl | 511 - .../ugorji/go/codec/fast-path.not.go | 32 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 101 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.generated.go | 233 - .../ugorji/go/codec/gen-helper.go.tmpl | 364 - .../ugorji/go/codec/gen.generated.go | 172 - vendor/src/github.com/ugorji/go/codec/gen.go | 1920 - .../src/github.com/ugorji/go/codec/helper.go | 1129 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../ugorji/go/codec/helper_unsafe.go | 45 - vendor/src/github.com/ugorji/go/codec/json.go | 1072 - .../src/github.com/ugorji/go/codec/msgpack.go | 844 - vendor/src/github.com/ugorji/go/codec/noop.go | 213 - .../github.com/ugorji/go/codec/prebuild.go | 3 - .../github.com/ugorji/go/codec/prebuild.sh | 199 - vendor/src/github.com/ugorji/go/codec/rpc.go | 180 - .../src/github.com/ugorji/go/codec/simple.go | 518 - .../ugorji/go/codec/test-cbor-goldens.json | 639 - vendor/src/github.com/ugorji/go/codec/test.py | 120 - .../src/github.com/ugorji/go/codec/tests.sh | 74 - vendor/src/github.com/ugorji/go/codec/time.go | 222 - .../github.com/vdemeester/shakers/.gitignore | 2 - .../github.com/vdemeester/shakers/Dockerfile | 16 - .../src/github.com/vdemeester/shakers/LICENSE | 191 - .../github.com/vdemeester/shakers/Makefile | 37 - .../github.com/vdemeester/shakers/README.md | 30 - .../src/github.com/vdemeester/shakers/bool.go | 46 - .../github.com/vdemeester/shakers/circle.yml | 11 - .../github.com/vdemeester/shakers/common.go | 310 - .../github.com/vdemeester/shakers/glide.yaml | 4 - .../github.com/vdemeester/shakers/string.go | 168 - .../src/github.com/vdemeester/shakers/time.go | 234 - .../vishvananda/netlink/.travis.yml | 8 - .../github.com/vishvananda/netlink/LICENSE | 192 - .../github.com/vishvananda/netlink/Makefile | 29 - .../github.com/vishvananda/netlink/README.md | 89 - .../github.com/vishvananda/netlink/addr.go | 45 - .../vishvananda/netlink/addr_linux.go | 232 - .../vishvananda/netlink/bpf_linux.go | 60 - .../github.com/vishvananda/netlink/class.go | 78 - .../vishvananda/netlink/class_linux.go | 254 - .../github.com/vishvananda/netlink/filter.go | 244 - .../vishvananda/netlink/filter_linux.go | 560 - .../vishvananda/netlink/handle_linux.go | 86 - .../github.com/vishvananda/netlink/link.go | 584 - .../vishvananda/netlink/link_linux.go | 1380 - .../vishvananda/netlink/link_tuntap_linux.go | 14 - .../github.com/vishvananda/netlink/neigh.go | 22 - .../vishvananda/netlink/neigh_linux.go | 229 - .../github.com/vishvananda/netlink/netlink.go | 31 - .../vishvananda/netlink/netlink_linux.go | 10 - .../netlink/netlink_unspecified.go | 143 - .../vishvananda/netlink/nl/addr_linux.go | 47 - .../vishvananda/netlink/nl/link_linux.go | 396 - .../vishvananda/netlink/nl/nl_linux.go | 501 - .../vishvananda/netlink/nl/route_linux.go | 54 - .../vishvananda/netlink/nl/syscall.go | 37 - .../vishvananda/netlink/nl/tc_linux.go | 675 - .../vishvananda/netlink/nl/xfrm_linux.go | 276 - .../netlink/nl/xfrm_policy_linux.go | 119 - .../netlink/nl/xfrm_state_linux.go | 242 - .../vishvananda/netlink/protinfo.go | 53 - .../vishvananda/netlink/protinfo_linux.go | 64 - .../github.com/vishvananda/netlink/qdisc.go | 231 - .../vishvananda/netlink/qdisc_linux.go | 513 - .../github.com/vishvananda/netlink/route.go | 66 - .../vishvananda/netlink/route_linux.go | 452 - .../vishvananda/netlink/route_unspecified.go | 7 - .../github.com/vishvananda/netlink/rule.go | 40 - .../vishvananda/netlink/rule_linux.go | 215 - .../github.com/vishvananda/netlink/xfrm.go | 74 - .../vishvananda/netlink/xfrm_policy.go | 74 - .../vishvananda/netlink/xfrm_policy_linux.go | 257 - .../vishvananda/netlink/xfrm_state.go | 98 - .../vishvananda/netlink/xfrm_state_linux.go | 344 - .../src/github.com/vishvananda/netns/LICENSE | 192 - .../github.com/vishvananda/netns/README.md | 49 - .../src/github.com/vishvananda/netns/netns.go | 67 - .../vishvananda/netns/netns_linux.go | 205 - .../vishvananda/netns/netns_linux_386.go | 7 - .../vishvananda/netns/netns_linux_amd64.go | 7 - .../vishvananda/netns/netns_linux_arm.go | 7 - .../vishvananda/netns/netns_linux_arm64.go | 7 - .../vishvananda/netns/netns_linux_ppc64le.go | 7 - .../vishvananda/netns/netns_linux_s390x.go | 7 - .../vishvananda/netns/netns_unspecified.go | 35 - vendor/src/golang.org/x/crypto/LICENSE | 27 - .../src/golang.org/x/crypto/bcrypt/base64.go | 35 - .../src/golang.org/x/crypto/bcrypt/bcrypt.go | 294 - .../src/golang.org/x/crypto/blowfish/block.go | 159 - .../golang.org/x/crypto/blowfish/cipher.go | 91 - .../src/golang.org/x/crypto/blowfish/const.go | 199 - .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 - .../src/golang.org/x/crypto/pkcs12/crypto.go | 131 - .../src/golang.org/x/crypto/pkcs12/errors.go | 23 - .../x/crypto/pkcs12/internal/rc2/rc2.go | 274 - vendor/src/golang.org/x/crypto/pkcs12/mac.go | 45 - .../src/golang.org/x/crypto/pkcs12/pbkdf.go | 170 - .../src/golang.org/x/crypto/pkcs12/pkcs12.go | 342 - .../golang.org/x/crypto/pkcs12/safebags.go | 57 - vendor/src/golang.org/x/net/LICENSE | 27 - vendor/src/golang.org/x/net/http2/.gitignore | 2 - vendor/src/golang.org/x/net/http2/Dockerfile | 51 - vendor/src/golang.org/x/net/http2/Makefile | 3 - vendor/src/golang.org/x/net/http2/README | 20 - .../x/net/http2/client_conn_pool.go | 225 - .../x/net/http2/configure_transport.go | 89 - vendor/src/golang.org/x/net/http2/errors.go | 122 - .../golang.org/x/net/http2/fixed_buffer.go | 60 - vendor/src/golang.org/x/net/http2/flow.go | 50 - vendor/src/golang.org/x/net/http2/frame.go | 1496 - vendor/src/golang.org/x/net/http2/go15.go | 11 - vendor/src/golang.org/x/net/http2/gotrack.go | 170 - .../src/golang.org/x/net/http2/headermap.go | 78 - .../golang.org/x/net/http2/hpack/encode.go | 251 - .../src/golang.org/x/net/http2/hpack/hpack.go | 542 - .../golang.org/x/net/http2/hpack/huffman.go | 190 - .../golang.org/x/net/http2/hpack/tables.go | 352 - vendor/src/golang.org/x/net/http2/http2.go | 464 - vendor/src/golang.org/x/net/http2/not_go15.go | 11 - vendor/src/golang.org/x/net/http2/not_go16.go | 13 - vendor/src/golang.org/x/net/http2/pipe.go | 147 - vendor/src/golang.org/x/net/http2/server.go | 2182 - .../src/golang.org/x/net/http2/transport.go | 1664 - vendor/src/golang.org/x/net/http2/write.go | 262 - .../src/golang.org/x/net/http2/writesched.go | 283 - .../x/net/internal/timeseries/timeseries.go | 525 - vendor/src/golang.org/x/net/proxy/direct.go | 18 - vendor/src/golang.org/x/net/proxy/per_host.go | 140 - vendor/src/golang.org/x/net/proxy/proxy.go | 94 - vendor/src/golang.org/x/net/proxy/socks5.go | 210 - vendor/src/golang.org/x/net/trace/events.go | 532 - .../src/golang.org/x/net/trace/histogram.go | 365 - vendor/src/golang.org/x/net/trace/trace.go | 1070 - .../src/golang.org/x/net/websocket/client.go | 113 - vendor/src/golang.org/x/net/websocket/hybi.go | 586 - .../src/golang.org/x/net/websocket/server.go | 113 - .../golang.org/x/net/websocket/websocket.go | 414 - vendor/src/golang.org/x/oauth2/.travis.yml | 14 - vendor/src/golang.org/x/oauth2/AUTHORS | 3 - .../src/golang.org/x/oauth2/CONTRIBUTING.md | 31 - vendor/src/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/src/golang.org/x/oauth2/LICENSE | 27 - vendor/src/golang.org/x/oauth2/README.md | 64 - .../golang.org/x/oauth2/client_appengine.go | 25 - .../golang.org/x/oauth2/google/appengine.go | 86 - .../x/oauth2/google/appengine_hook.go | 13 - .../x/oauth2/google/appenginevm_hook.go | 14 - .../src/golang.org/x/oauth2/google/default.go | 155 - .../src/golang.org/x/oauth2/google/google.go | 145 - vendor/src/golang.org/x/oauth2/google/jwt.go | 71 - vendor/src/golang.org/x/oauth2/google/sdk.go | 168 - .../golang.org/x/oauth2/internal/oauth2.go | 76 - .../src/golang.org/x/oauth2/internal/token.go | 221 - .../golang.org/x/oauth2/internal/transport.go | 69 - vendor/src/golang.org/x/oauth2/jws/jws.go | 172 - vendor/src/golang.org/x/oauth2/jwt/jwt.go | 153 - vendor/src/golang.org/x/oauth2/oauth2.go | 337 - vendor/src/golang.org/x/oauth2/token.go | 158 - vendor/src/golang.org/x/oauth2/transport.go | 132 - vendor/src/golang.org/x/sys/LICENSE | 27 - vendor/src/google.golang.org/api/LICENSE | 27 - .../google.golang.org/api/gensupport/json.go | 177 - .../api/gensupport/params.go | 31 - .../api/googleapi/googleapi.go | 588 - .../googleapi/internal/uritemplates/LICENSE | 18 - .../internal/uritemplates/uritemplates.go | 359 - .../googleapi/internal/uritemplates/utils.go | 13 - .../google.golang.org/api/googleapi/types.go | 182 - .../api/logging/v1beta3/logging-api.json | 1692 - .../api/logging/v1beta3/logging-gen.go | 4787 - .../src/google.golang.org/cloud/.travis.yml | 11 - vendor/src/google.golang.org/cloud/AUTHORS | 12 - .../google.golang.org/cloud/CONTRIBUTING.md | 114 - .../src/google.golang.org/cloud/CONTRIBUTORS | 24 - vendor/src/google.golang.org/cloud/LICENSE | 202 - vendor/src/google.golang.org/cloud/README.md | 135 - vendor/src/google.golang.org/cloud/cloud.go | 49 - .../cloud/compute/metadata/metadata.go | 327 - .../google.golang.org/cloud/internal/cloud.go | 128 - .../cloud/internal/opts/option.go | 24 - .../cloud/internal/transport/cancelreq.go | 29 - .../internal/transport/cancelreq_legacy.go | 31 - .../cloud/internal/transport/dial.go | 134 - .../cloud/internal/transport/proto.go | 80 - .../src/google.golang.org/cloud/key.json.enc | Bin 1248 -> 0 bytes .../cloud/logging/logging.go | 468 - vendor/src/google.golang.org/cloud/option.go | 102 - vendor/src/google.golang.org/grpc/.travis.yml | 17 - .../google.golang.org/grpc/CONTRIBUTING.md | 46 - vendor/src/google.golang.org/grpc/LICENSE | 28 - vendor/src/google.golang.org/grpc/Makefile | 51 - vendor/src/google.golang.org/grpc/PATENTS | 22 - vendor/src/google.golang.org/grpc/README.md | 32 - vendor/src/google.golang.org/grpc/backoff.go | 80 - vendor/src/google.golang.org/grpc/call.go | 191 - .../src/google.golang.org/grpc/clientconn.go | 623 - vendor/src/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 16 - .../src/google.golang.org/grpc/codes/codes.go | 159 - vendor/src/google.golang.org/grpc/coverage.sh | 47 - .../grpc/credentials/credentials.go | 226 - .../grpc/credentials/oauth/oauth.go | 177 - vendor/src/google.golang.org/grpc/doc.go | 6 - .../google.golang.org/grpc/grpclog/logger.go | 93 - .../src/google.golang.org/grpc/interceptor.go | 74 - .../grpc/internal/internal.go | 49 - .../grpc/metadata/metadata.go | 134 - .../google.golang.org/grpc/naming/naming.go | 73 - .../src/google.golang.org/grpc/peer/peer.go | 65 - vendor/src/google.golang.org/grpc/picker.go | 243 - vendor/src/google.golang.org/grpc/rpc_util.go | 418 - vendor/src/google.golang.org/grpc/server.go | 782 - vendor/src/google.golang.org/grpc/stream.go | 414 - vendor/src/google.golang.org/grpc/trace.go | 120 - .../grpc/transport/control.go | 210 - .../grpc/transport/handler_server.go | 383 - .../grpc/transport/http2_client.go | 914 - .../grpc/transport/http2_server.go | 735 - .../grpc/transport/http_util.go | 411 - .../grpc/transport/transport.go | 508 - vendor/src/gopkg.in/fsnotify.v1/.gitignore | 6 - vendor/src/gopkg.in/fsnotify.v1/.travis.yml | 29 - vendor/src/gopkg.in/fsnotify.v1/AUTHORS | 43 - vendor/src/gopkg.in/fsnotify.v1/CHANGELOG.md | 287 - .../src/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 - vendor/src/gopkg.in/fsnotify.v1/LICENSE | 28 - vendor/src/gopkg.in/fsnotify.v1/README.md | 46 - vendor/src/gopkg.in/fsnotify.v1/fen.go | 37 - vendor/src/gopkg.in/fsnotify.v1/fsnotify.go | 62 - vendor/src/gopkg.in/fsnotify.v1/inotify.go | 324 - .../gopkg.in/fsnotify.v1/inotify_poller.go | 186 - vendor/src/gopkg.in/fsnotify.v1/kqueue.go | 502 - .../src/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 - .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 - vendor/src/gopkg.in/fsnotify.v1/windows.go | 561 - volume/drivers/adapter.go | 164 - volume/drivers/extpoint.go | 181 - volume/drivers/extpoint_test.go | 23 - volume/drivers/proxy.go | 241 - volume/drivers/proxy_test.go | 132 - volume/local/local.go | 338 - volume/local/local_test.go | 249 - volume/local/local_unix.go | 69 - volume/local/local_windows.go | 34 - volume/store/errors.go | 74 - volume/store/store.go | 517 - volume/store/store_test.go | 201 - volume/store/store_unix.go | 9 - volume/store/store_windows.go | 12 - volume/testutils/testutils.go | 116 - volume/volume.go | 190 - volume/volume_copy.go | 28 - volume/volume_propagation_linux.go | 44 - volume/volume_propagation_linux_test.go | 65 - volume/volume_propagation_unsupported.go | 22 - volume/volume_test.go | 212 - volume/volume_unix.go | 186 - volume/volume_windows.go | 206 - 2459 files changed, 666257 deletions(-) delete mode 100644 api/README.md delete mode 100644 api/client/bundlefile/bundlefile.go delete mode 100644 api/client/bundlefile/bundlefile_test.go delete mode 100644 api/client/cli.go delete mode 100644 api/client/client.go delete mode 100644 api/client/commands.go delete mode 100644 api/client/container/attach.go delete mode 100644 api/client/container/commit.go delete mode 100644 api/client/container/cp.go delete mode 100644 api/client/container/create.go delete mode 100644 api/client/container/diff.go delete mode 100644 api/client/container/export.go delete mode 100644 api/client/container/kill.go delete mode 100644 api/client/container/logs.go delete mode 100644 api/client/container/pause.go delete mode 100644 api/client/container/port.go delete mode 100644 api/client/container/ps.go delete mode 100644 api/client/container/rename.go delete mode 100644 api/client/container/restart.go delete mode 100644 api/client/container/rm.go delete mode 100644 api/client/container/run.go delete mode 100644 api/client/container/start.go delete mode 100644 api/client/container/stats.go delete mode 100644 api/client/container/stats_helpers.go delete mode 100644 api/client/container/stats_unit_test.go delete mode 100644 api/client/container/stop.go delete mode 100644 api/client/container/top.go delete mode 100644 api/client/container/unpause.go delete mode 100644 api/client/container/update.go delete mode 100644 api/client/container/utils.go delete mode 100644 api/client/container/wait.go delete mode 100644 api/client/credentials.go delete mode 100644 api/client/exec.go delete mode 100644 api/client/exec_test.go delete mode 100644 api/client/formatter/custom.go delete mode 100644 api/client/formatter/custom_test.go delete mode 100644 api/client/formatter/formatter.go delete mode 100644 api/client/formatter/formatter_test.go delete mode 100644 api/client/hijack.go delete mode 100644 api/client/idresolver/idresolver.go delete mode 100644 api/client/image/build.go delete mode 100644 api/client/image/history.go delete mode 100644 api/client/image/images.go delete mode 100644 api/client/image/import.go delete mode 100644 api/client/image/load.go delete mode 100644 api/client/image/pull.go delete mode 100644 api/client/image/push.go delete mode 100644 api/client/image/remove.go delete mode 100644 api/client/image/save.go delete mode 100644 api/client/image/search.go delete mode 100644 api/client/image/tag.go delete mode 100644 api/client/inspect.go delete mode 100644 api/client/inspect/inspector.go delete mode 100644 api/client/inspect/inspector_test.go delete mode 100644 api/client/network/cmd.go delete mode 100644 api/client/network/connect.go delete mode 100644 api/client/network/create.go delete mode 100644 api/client/network/disconnect.go delete mode 100644 api/client/network/inspect.go delete mode 100644 api/client/network/list.go delete mode 100644 api/client/network/remove.go delete mode 100644 api/client/node/accept.go delete mode 100644 api/client/node/cmd.go delete mode 100644 api/client/node/demote.go delete mode 100644 api/client/node/inspect.go delete mode 100644 api/client/node/list.go delete mode 100644 api/client/node/opts.go delete mode 100644 api/client/node/promote.go delete mode 100644 api/client/node/remove.go delete mode 100644 api/client/node/tasks.go delete mode 100644 api/client/node/update.go delete mode 100644 api/client/plugin/cmd.go delete mode 100644 api/client/plugin/cmd_experimental.go delete mode 100644 api/client/plugin/disable.go delete mode 100644 api/client/plugin/enable.go delete mode 100644 api/client/plugin/inspect.go delete mode 100644 api/client/plugin/install.go delete mode 100644 api/client/plugin/list.go delete mode 100644 api/client/plugin/push.go delete mode 100644 api/client/plugin/remove.go delete mode 100644 api/client/plugin/set.go delete mode 100644 api/client/registry.go delete mode 100644 api/client/registry/login.go delete mode 100644 api/client/registry/logout.go delete mode 100644 api/client/service/cmd.go delete mode 100644 api/client/service/create.go delete mode 100644 api/client/service/inspect.go delete mode 100644 api/client/service/list.go delete mode 100644 api/client/service/opts.go delete mode 100644 api/client/service/opts_test.go delete mode 100644 api/client/service/remove.go delete mode 100644 api/client/service/scale.go delete mode 100644 api/client/service/tasks.go delete mode 100644 api/client/service/update.go delete mode 100644 api/client/service/update_test.go delete mode 100644 api/client/stack/cmd.go delete mode 100644 api/client/stack/cmd_stub.go delete mode 100644 api/client/stack/common.go delete mode 100644 api/client/stack/config.go delete mode 100644 api/client/stack/deploy.go delete mode 100644 api/client/stack/opts.go delete mode 100644 api/client/stack/remove.go delete mode 100644 api/client/stack/tasks.go delete mode 100644 api/client/swarm/cmd.go delete mode 100644 api/client/swarm/init.go delete mode 100644 api/client/swarm/inspect.go delete mode 100644 api/client/swarm/join.go delete mode 100644 api/client/swarm/leave.go delete mode 100644 api/client/swarm/opts.go delete mode 100644 api/client/swarm/opts_test.go delete mode 100644 api/client/swarm/secret.go delete mode 100644 api/client/swarm/update.go delete mode 100644 api/client/system/events.go delete mode 100644 api/client/system/events_utils.go delete mode 100644 api/client/system/info.go delete mode 100644 api/client/system/version.go delete mode 100644 api/client/task/print.go delete mode 100644 api/client/trust.go delete mode 100644 api/client/trust_test.go delete mode 100644 api/client/utils.go delete mode 100644 api/client/volume/cmd.go delete mode 100644 api/client/volume/create.go delete mode 100644 api/client/volume/inspect.go delete mode 100644 api/client/volume/list.go delete mode 100644 api/client/volume/remove.go delete mode 100644 api/common.go delete mode 100644 api/common_test.go delete mode 100644 api/fixtures/keyfile delete mode 100644 api/server/httputils/decoder.go delete mode 100644 api/server/httputils/errors.go delete mode 100644 api/server/httputils/form.go delete mode 100644 api/server/httputils/form_test.go delete mode 100644 api/server/httputils/httputils.go delete mode 100644 api/server/middleware.go delete mode 100644 api/server/middleware/cors.go delete mode 100644 api/server/middleware/debug.go delete mode 100644 api/server/middleware/middleware.go delete mode 100644 api/server/middleware/user_agent.go delete mode 100644 api/server/middleware/version.go delete mode 100644 api/server/middleware/version_test.go delete mode 100644 api/server/profiler.go delete mode 100644 api/server/router/build/backend.go delete mode 100644 api/server/router/build/build.go delete mode 100644 api/server/router/build/build_routes.go delete mode 100644 api/server/router/container/backend.go delete mode 100644 api/server/router/container/container.go delete mode 100644 api/server/router/container/container_routes.go delete mode 100644 api/server/router/container/copy.go delete mode 100644 api/server/router/container/exec.go delete mode 100644 api/server/router/container/inspect.go delete mode 100644 api/server/router/image/backend.go delete mode 100644 api/server/router/image/image.go delete mode 100644 api/server/router/image/image_routes.go delete mode 100644 api/server/router/local.go delete mode 100644 api/server/router/network/backend.go delete mode 100644 api/server/router/network/filter.go delete mode 100644 api/server/router/network/network.go delete mode 100644 api/server/router/network/network_routes.go delete mode 100644 api/server/router/plugin/backend.go delete mode 100644 api/server/router/plugin/plugin.go delete mode 100644 api/server/router/plugin/plugin_experimental.go delete mode 100644 api/server/router/plugin/plugin_regular.go delete mode 100644 api/server/router/plugin/plugin_routes.go delete mode 100644 api/server/router/router.go delete mode 100644 api/server/router/swarm/backend.go delete mode 100644 api/server/router/swarm/cluster.go delete mode 100644 api/server/router/swarm/cluster_routes.go delete mode 100644 api/server/router/system/backend.go delete mode 100644 api/server/router/system/system.go delete mode 100644 api/server/router/system/system_routes.go delete mode 100644 api/server/router/volume/backend.go delete mode 100644 api/server/router/volume/volume.go delete mode 100644 api/server/router/volume/volume_routes.go delete mode 100644 api/server/router_swapper.go delete mode 100644 api/server/server.go delete mode 100644 api/server/server_test.go delete mode 100644 api/types/backend/backend.go delete mode 100644 builder/builder.go delete mode 100644 builder/context.go delete mode 100644 builder/context_test.go delete mode 100644 builder/context_unix.go delete mode 100644 builder/context_windows.go delete mode 100644 builder/dockerfile/bflag.go delete mode 100644 builder/dockerfile/bflag_test.go delete mode 100644 builder/dockerfile/builder.go delete mode 100644 builder/dockerfile/builder_unix.go delete mode 100644 builder/dockerfile/builder_windows.go delete mode 100644 builder/dockerfile/command/command.go delete mode 100644 builder/dockerfile/dispatchers.go delete mode 100644 builder/dockerfile/dispatchers_test.go delete mode 100644 builder/dockerfile/dispatchers_unix.go delete mode 100644 builder/dockerfile/dispatchers_windows.go delete mode 100644 builder/dockerfile/dispatchers_windows_test.go delete mode 100644 builder/dockerfile/envVarTest delete mode 100644 builder/dockerfile/evaluator.go delete mode 100644 builder/dockerfile/evaluator_test.go delete mode 100644 builder/dockerfile/evaluator_unix.go delete mode 100644 builder/dockerfile/evaluator_windows.go delete mode 100644 builder/dockerfile/internals.go delete mode 100644 builder/dockerfile/internals_test.go delete mode 100644 builder/dockerfile/internals_unix.go delete mode 100644 builder/dockerfile/internals_windows.go delete mode 100644 builder/dockerfile/internals_windows_test.go delete mode 100644 builder/dockerfile/parser/dumper/main.go delete mode 100644 builder/dockerfile/parser/json_test.go delete mode 100644 builder/dockerfile/parser/line_parsers.go delete mode 100644 builder/dockerfile/parser/parser.go delete mode 100644 builder/dockerfile/parser/parser_test.go delete mode 100644 builder/dockerfile/parser/testfile-line/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/result delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/result delete mode 100644 builder/dockerfile/parser/testfiles/continueIndent/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/continueIndent/result delete mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/result delete mode 100644 builder/dockerfile/parser/testfiles/docker/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/docker/result delete mode 100644 builder/dockerfile/parser/testfiles/env/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/env/result delete mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/result delete mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/result delete mode 100644 builder/dockerfile/parser/testfiles/escape/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape/result delete mode 100644 builder/dockerfile/parser/testfiles/escapes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escapes/result delete mode 100644 builder/dockerfile/parser/testfiles/flags/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/flags/result delete mode 100644 builder/dockerfile/parser/testfiles/health/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/health/result delete mode 100644 builder/dockerfile/parser/testfiles/influxdb/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/influxdb/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result delete mode 100644 builder/dockerfile/parser/testfiles/json/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/json/result delete mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result delete mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result delete mode 100644 builder/dockerfile/parser/testfiles/mail/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/mail/result delete mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/result delete mode 100644 builder/dockerfile/parser/testfiles/mumble/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/mumble/result delete mode 100644 builder/dockerfile/parser/testfiles/nginx/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/nginx/result delete mode 100644 builder/dockerfile/parser/testfiles/tf2/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/tf2/result delete mode 100644 builder/dockerfile/parser/testfiles/weechat/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/weechat/result delete mode 100644 builder/dockerfile/parser/testfiles/znc/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/znc/result delete mode 100644 builder/dockerfile/parser/utils.go delete mode 100644 builder/dockerfile/shell_parser.go delete mode 100644 builder/dockerfile/shell_parser_test.go delete mode 100644 builder/dockerfile/support.go delete mode 100644 builder/dockerfile/support_test.go delete mode 100644 builder/dockerfile/utils_test.go delete mode 100644 builder/dockerfile/wordsTest delete mode 100644 builder/dockerignore.go delete mode 100644 builder/dockerignore/dockerignore.go delete mode 100644 builder/dockerignore/dockerignore_test.go delete mode 100644 builder/dockerignore_test.go delete mode 100644 builder/git.go delete mode 100644 builder/remote.go delete mode 100644 builder/remote_test.go delete mode 100644 builder/tarsum.go delete mode 100644 builder/tarsum_test.go delete mode 100644 builder/utils_test.go delete mode 100644 cli/cli.go delete mode 100644 cli/cobraadaptor/adaptor.go delete mode 100644 cli/error.go delete mode 100644 cli/flagerrors.go delete mode 100644 cli/flags/client.go delete mode 100644 cli/flags/common.go delete mode 100644 cli/required.go delete mode 100644 cli/usage.go delete mode 100644 cliconfig/config.go delete mode 100644 cliconfig/config_test.go delete mode 100644 cliconfig/configfile/file.go delete mode 100644 cliconfig/configfile/file_test.go delete mode 100644 cliconfig/credentials/credentials.go delete mode 100644 cliconfig/credentials/default_store.go delete mode 100644 cliconfig/credentials/default_store_darwin.go delete mode 100644 cliconfig/credentials/default_store_linux.go delete mode 100644 cliconfig/credentials/default_store_unsupported.go delete mode 100644 cliconfig/credentials/default_store_windows.go delete mode 100644 cliconfig/credentials/file_store.go delete mode 100644 cliconfig/credentials/file_store_test.go delete mode 100644 cliconfig/credentials/native_store.go delete mode 100644 cliconfig/credentials/native_store_test.go delete mode 100644 cmd/docker/daemon.go delete mode 100644 cmd/docker/daemon_none.go delete mode 100644 cmd/docker/daemon_none_test.go delete mode 100644 cmd/docker/daemon_unix.go delete mode 100644 cmd/docker/docker.go delete mode 100644 cmd/docker/docker_test.go delete mode 100644 cmd/docker/docker_windows.go delete mode 100644 cmd/docker/usage.go delete mode 100644 cmd/docker/usage_test.go delete mode 100644 cmd/dockerd/README.md delete mode 100644 cmd/dockerd/daemon.go delete mode 100644 cmd/dockerd/daemon_freebsd.go delete mode 100644 cmd/dockerd/daemon_linux.go delete mode 100644 cmd/dockerd/daemon_no_plugin_support.go delete mode 100644 cmd/dockerd/daemon_plugin_support.go delete mode 100644 cmd/dockerd/daemon_solaris.go delete mode 100644 cmd/dockerd/daemon_test.go delete mode 100644 cmd/dockerd/daemon_unix.go delete mode 100644 cmd/dockerd/daemon_unix_test.go delete mode 100644 cmd/dockerd/daemon_windows.go delete mode 100644 cmd/dockerd/docker.go delete mode 100644 cmd/dockerd/docker_windows.go delete mode 100644 cmd/dockerd/hack/malformed_host_override.go delete mode 100644 cmd/dockerd/hack/malformed_host_override_test.go delete mode 100644 cmd/dockerd/routes.go delete mode 100644 cmd/dockerd/routes_experimental.go delete mode 100644 cmd/dockerd/service_unsupported.go delete mode 100644 cmd/dockerd/service_windows.go delete mode 100644 container/archive.go delete mode 100644 container/container.go delete mode 100644 container/container_solaris.go delete mode 100644 container/container_unit_test.go delete mode 100644 container/container_unix.go delete mode 100644 container/container_windows.go delete mode 100644 container/health.go delete mode 100644 container/history.go delete mode 100644 container/memory_store.go delete mode 100644 container/memory_store_test.go delete mode 100644 container/monitor.go delete mode 100644 container/mounts_unix.go delete mode 100644 container/mounts_windows.go delete mode 100644 container/state.go delete mode 100644 container/state_solaris.go delete mode 100644 container/state_test.go delete mode 100644 container/state_unix.go delete mode 100644 container/state_windows.go delete mode 100644 container/store.go delete mode 100644 daemon/apparmor_default.go delete mode 100644 daemon/apparmor_default_unsupported.go delete mode 100644 daemon/archive.go delete mode 100644 daemon/archive_unix.go delete mode 100644 daemon/archive_windows.go delete mode 100644 daemon/attach.go delete mode 100644 daemon/auth.go delete mode 100644 daemon/caps/utils_unix.go delete mode 100644 daemon/changes.go delete mode 100644 daemon/cluster/cluster.go delete mode 100644 daemon/cluster/convert/container.go delete mode 100644 daemon/cluster/convert/network.go delete mode 100644 daemon/cluster/convert/node.go delete mode 100644 daemon/cluster/convert/service.go delete mode 100644 daemon/cluster/convert/swarm.go delete mode 100644 daemon/cluster/convert/task.go delete mode 100644 daemon/cluster/executor/backend.go delete mode 100644 daemon/cluster/executor/container/adapter.go delete mode 100644 daemon/cluster/executor/container/container.go delete mode 100644 daemon/cluster/executor/container/controller.go delete mode 100644 daemon/cluster/executor/container/errors.go delete mode 100644 daemon/cluster/executor/container/executor.go delete mode 100644 daemon/cluster/executor/container/health_test.go delete mode 100644 daemon/cluster/filters.go delete mode 100644 daemon/cluster/helpers.go delete mode 100644 daemon/cluster/provider/network.go delete mode 100644 daemon/commit.go delete mode 100644 daemon/config.go delete mode 100644 daemon/config_experimental.go delete mode 100644 daemon/config_solaris.go delete mode 100644 daemon/config_stub.go delete mode 100644 daemon/config_test.go delete mode 100644 daemon/config_unix.go delete mode 100644 daemon/config_windows.go delete mode 100644 daemon/container.go delete mode 100644 daemon/container_operations.go delete mode 100644 daemon/container_operations_solaris.go delete mode 100644 daemon/container_operations_unix.go delete mode 100644 daemon/container_operations_windows.go delete mode 100644 daemon/create.go delete mode 100644 daemon/create_unix.go delete mode 100644 daemon/create_windows.go delete mode 100644 daemon/daemon.go delete mode 100644 daemon/daemon_experimental.go delete mode 100644 daemon/daemon_linux.go delete mode 100644 daemon/daemon_linux_test.go delete mode 100644 daemon/daemon_solaris.go delete mode 100644 daemon/daemon_stub.go delete mode 100644 daemon/daemon_test.go delete mode 100644 daemon/daemon_unix.go delete mode 100644 daemon/daemon_unix_test.go delete mode 100644 daemon/daemon_unsupported.go delete mode 100644 daemon/daemon_windows.go delete mode 100644 daemon/debugtrap_unix.go delete mode 100644 daemon/debugtrap_unsupported.go delete mode 100644 daemon/debugtrap_windows.go delete mode 100644 daemon/delete.go delete mode 100644 daemon/delete_test.go delete mode 100644 daemon/discovery.go delete mode 100644 daemon/discovery_test.go delete mode 100644 daemon/errors.go delete mode 100644 daemon/events.go delete mode 100644 daemon/events/events.go delete mode 100644 daemon/events/events_test.go delete mode 100644 daemon/events/filter.go delete mode 100644 daemon/events/testutils/testutils.go delete mode 100644 daemon/events_test.go delete mode 100644 daemon/exec.go delete mode 100644 daemon/exec/exec.go delete mode 100644 daemon/exec_linux.go delete mode 100644 daemon/exec_solaris.go delete mode 100644 daemon/exec_windows.go delete mode 100644 daemon/export.go delete mode 100644 daemon/health.go delete mode 100644 daemon/health_test.go delete mode 100644 daemon/image.go delete mode 100644 daemon/image_delete.go delete mode 100644 daemon/image_exporter.go delete mode 100644 daemon/image_history.go delete mode 100644 daemon/image_inspect.go delete mode 100644 daemon/image_pull.go delete mode 100644 daemon/image_push.go delete mode 100644 daemon/image_tag.go delete mode 100644 daemon/images.go delete mode 100644 daemon/import.go delete mode 100644 daemon/info.go delete mode 100644 daemon/inspect.go delete mode 100644 daemon/inspect_solaris.go delete mode 100644 daemon/inspect_unix.go delete mode 100644 daemon/inspect_windows.go delete mode 100644 daemon/keys.go delete mode 100644 daemon/keys_unsupported.go delete mode 100644 daemon/kill.go delete mode 100644 daemon/links.go delete mode 100644 daemon/links/links.go delete mode 100644 daemon/links/links_test.go delete mode 100644 daemon/links_test.go delete mode 100644 daemon/list.go delete mode 100644 daemon/list_unix.go delete mode 100644 daemon/list_windows.go delete mode 100644 daemon/logdrivers_linux.go delete mode 100644 daemon/logdrivers_windows.go delete mode 100644 daemon/logger/awslogs/cloudwatchlogs.go delete mode 100644 daemon/logger/awslogs/cloudwatchlogs_test.go delete mode 100644 daemon/logger/awslogs/cwlogsiface_mock_test.go delete mode 100644 daemon/logger/context.go delete mode 100644 daemon/logger/copier.go delete mode 100644 daemon/logger/copier_test.go delete mode 100644 daemon/logger/etwlogs/etwlogs_windows.go delete mode 100644 daemon/logger/factory.go delete mode 100644 daemon/logger/fluentd/fluentd.go delete mode 100644 daemon/logger/gcplogs/gcplogging.go delete mode 100644 daemon/logger/gelf/gelf.go delete mode 100644 daemon/logger/gelf/gelf_unsupported.go delete mode 100644 daemon/logger/journald/journald.go delete mode 100644 daemon/logger/journald/journald_unsupported.go delete mode 100644 daemon/logger/journald/read.go delete mode 100644 daemon/logger/journald/read_native.go delete mode 100644 daemon/logger/journald/read_native_compat.go delete mode 100644 daemon/logger/journald/read_unsupported.go delete mode 100644 daemon/logger/jsonfilelog/jsonfilelog.go delete mode 100644 daemon/logger/jsonfilelog/jsonfilelog_test.go delete mode 100644 daemon/logger/jsonfilelog/read.go delete mode 100644 daemon/logger/logger.go delete mode 100644 daemon/logger/loggerutils/log_tag.go delete mode 100644 daemon/logger/loggerutils/log_tag_test.go delete mode 100644 daemon/logger/loggerutils/rotatefilewriter.go delete mode 100644 daemon/logger/splunk/splunk.go delete mode 100644 daemon/logger/syslog/syslog.go delete mode 100644 daemon/logger/syslog/syslog_test.go delete mode 100644 daemon/logger/syslog/syslog_unsupported.go delete mode 100644 daemon/logs.go delete mode 100644 daemon/logs_test.go delete mode 100644 daemon/monitor.go delete mode 100644 daemon/monitor_linux.go delete mode 100644 daemon/monitor_solaris.go delete mode 100644 daemon/monitor_windows.go delete mode 100644 daemon/mounts.go delete mode 100644 daemon/names.go delete mode 100644 daemon/network.go delete mode 100644 daemon/network/settings.go delete mode 100644 daemon/oci_linux.go delete mode 100644 daemon/oci_solaris.go delete mode 100644 daemon/oci_windows.go delete mode 100644 daemon/pause.go delete mode 100644 daemon/rename.go delete mode 100644 daemon/resize.go delete mode 100644 daemon/restart.go delete mode 100644 daemon/search.go delete mode 100644 daemon/search_test.go delete mode 100644 daemon/seccomp_disabled.go delete mode 100644 daemon/seccomp_linux.go delete mode 100644 daemon/seccomp_unsupported.go delete mode 100644 daemon/selinux_linux.go delete mode 100644 daemon/selinux_unsupported.go delete mode 100644 daemon/start.go delete mode 100644 daemon/start_linux.go delete mode 100644 daemon/start_windows.go delete mode 100644 daemon/stats.go delete mode 100644 daemon/stats_collector_solaris.go delete mode 100644 daemon/stats_collector_unix.go delete mode 100644 daemon/stats_collector_windows.go delete mode 100644 daemon/stop.go delete mode 100644 daemon/top_unix.go delete mode 100644 daemon/top_unix_test.go delete mode 100644 daemon/top_windows.go delete mode 100644 daemon/unpause.go delete mode 100644 daemon/update.go delete mode 100644 daemon/update_linux.go delete mode 100644 daemon/update_solaris.go delete mode 100644 daemon/update_windows.go delete mode 100644 daemon/volumes.go delete mode 100644 daemon/volumes_unit_test.go delete mode 100644 daemon/volumes_unix.go delete mode 100644 daemon/volumes_windows.go delete mode 100644 daemon/wait.go delete mode 100644 distribution/errors.go delete mode 100644 distribution/fixtures/validate_manifest/bad_manifest delete mode 100644 distribution/fixtures/validate_manifest/extra_data_manifest delete mode 100644 distribution/fixtures/validate_manifest/good_manifest delete mode 100644 distribution/metadata/metadata.go delete mode 100644 distribution/metadata/v1_id_service.go delete mode 100644 distribution/metadata/v1_id_service_test.go delete mode 100644 distribution/metadata/v2_metadata_service.go delete mode 100644 distribution/metadata/v2_metadata_service_test.go delete mode 100644 distribution/pull.go delete mode 100644 distribution/pull_v1.go delete mode 100644 distribution/pull_v2.go delete mode 100644 distribution/pull_v2_test.go delete mode 100644 distribution/pull_v2_unix.go delete mode 100644 distribution/pull_v2_windows.go delete mode 100644 distribution/push.go delete mode 100644 distribution/push_v1.go delete mode 100644 distribution/push_v2.go delete mode 100644 distribution/registry.go delete mode 100644 distribution/registry_unit_test.go delete mode 100644 distribution/xfer/download.go delete mode 100644 distribution/xfer/download_test.go delete mode 100644 distribution/xfer/transfer.go delete mode 100644 distribution/xfer/transfer_test.go delete mode 100644 distribution/xfer/upload.go delete mode 100644 distribution/xfer/upload_test.go delete mode 100644 dockerversion/useragent.go delete mode 100644 errors/errors.go delete mode 100644 experimental/README.md delete mode 100644 experimental/docker-stacks-and-bundles.md delete mode 100644 experimental/images/ipvlan-l3.gliffy delete mode 100644 experimental/images/ipvlan-l3.png delete mode 100644 experimental/images/ipvlan-l3.svg delete mode 100644 experimental/images/ipvlan_l2_simple.gliffy delete mode 100644 experimental/images/ipvlan_l2_simple.png delete mode 100644 experimental/images/ipvlan_l2_simple.svg delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.gliffy delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.png delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.svg delete mode 100644 experimental/images/macvlan_bridge_simple.gliffy delete mode 100644 experimental/images/macvlan_bridge_simple.png delete mode 100644 experimental/images/macvlan_bridge_simple.svg delete mode 100644 experimental/images/multi_tenant_8021q_vlans.gliffy delete mode 100644 experimental/images/multi_tenant_8021q_vlans.png delete mode 100644 experimental/images/multi_tenant_8021q_vlans.svg delete mode 100644 experimental/images/vlans-deeper-look.gliffy delete mode 100644 experimental/images/vlans-deeper-look.png delete mode 100644 experimental/images/vlans-deeper-look.svg delete mode 100644 experimental/plugins_graphdriver.md delete mode 100644 experimental/vlan-networks.md delete mode 100644 hack/make/binary-client delete mode 100644 hack/make/binary-daemon delete mode 100644 hack/make/dynbinary-client delete mode 100644 hack/make/dynbinary-daemon delete mode 100644 hack/make/install-binary-client delete mode 100644 hack/make/install-binary-daemon delete mode 100644 image/fs.go delete mode 100644 image/fs_test.go delete mode 100644 image/image.go delete mode 100644 image/image_test.go delete mode 100644 image/rootfs.go delete mode 100644 image/rootfs_unix.go delete mode 100644 image/rootfs_windows.go delete mode 100644 image/spec/v1.1.md delete mode 100644 image/spec/v1.md delete mode 100644 image/store.go delete mode 100644 image/store_test.go delete mode 100644 image/tarexport/load.go delete mode 100644 image/tarexport/save.go delete mode 100644 image/tarexport/tarexport.go delete mode 100644 image/v1/imagev1.go delete mode 100644 image/v1/imagev1_test.go delete mode 100644 layer/empty.go delete mode 100644 layer/empty_test.go delete mode 100644 layer/filestore.go delete mode 100644 layer/filestore_test.go delete mode 100644 layer/layer.go delete mode 100644 layer/layer_store.go delete mode 100644 layer/layer_store_windows.go delete mode 100644 layer/layer_test.go delete mode 100644 layer/layer_unix.go delete mode 100644 layer/layer_unix_test.go delete mode 100644 layer/layer_windows.go delete mode 100644 layer/migration.go delete mode 100644 layer/migration_test.go delete mode 100644 layer/mount_test.go delete mode 100644 layer/mounted_layer.go delete mode 100644 layer/ro_layer.go delete mode 100644 layer/ro_layer_windows.go delete mode 100644 libcontainerd/client.go delete mode 100644 libcontainerd/client_linux.go delete mode 100644 libcontainerd/client_solaris.go delete mode 100644 libcontainerd/client_windows.go delete mode 100644 libcontainerd/container.go delete mode 100644 libcontainerd/container_linux.go delete mode 100644 libcontainerd/container_solaris.go delete mode 100644 libcontainerd/container_windows.go delete mode 100644 libcontainerd/pausemonitor_linux.go delete mode 100644 libcontainerd/process.go delete mode 100644 libcontainerd/process_linux.go delete mode 100644 libcontainerd/process_solaris.go delete mode 100644 libcontainerd/process_windows.go delete mode 100644 libcontainerd/queue_linux.go delete mode 100644 libcontainerd/remote.go delete mode 100644 libcontainerd/remote_linux.go delete mode 100644 libcontainerd/remote_solaris.go delete mode 100644 libcontainerd/remote_windows.go delete mode 100644 libcontainerd/types.go delete mode 100644 libcontainerd/types_linux.go delete mode 100644 libcontainerd/types_solaris.go delete mode 100644 libcontainerd/types_windows.go delete mode 100644 libcontainerd/utils_linux.go delete mode 100644 libcontainerd/utils_windows.go delete mode 100644 libcontainerd/windowsoci/oci_windows.go delete mode 100644 libcontainerd/windowsoci/unsupported.go delete mode 100644 migrate/v1/migratev1.go delete mode 100644 migrate/v1/migratev1_test.go delete mode 100644 oci/defaults_linux.go delete mode 100644 oci/defaults_solaris.go delete mode 100644 oci/defaults_windows.go delete mode 100644 opts/hosts.go delete mode 100644 opts/hosts_test.go delete mode 100644 opts/hosts_unix.go delete mode 100644 opts/hosts_windows.go delete mode 100644 opts/ip.go delete mode 100644 opts/ip_test.go delete mode 100644 pkg/aaparser/aaparser.go delete mode 100644 pkg/aaparser/aaparser_test.go delete mode 100644 pkg/authorization/api.go delete mode 100644 pkg/authorization/authz.go delete mode 100644 pkg/authorization/authz_unix_test.go delete mode 100644 pkg/authorization/middleware.go delete mode 100644 pkg/authorization/plugin.go delete mode 100644 pkg/authorization/response.go delete mode 100644 pkg/broadcaster/unbuffered.go delete mode 100644 pkg/broadcaster/unbuffered_test.go delete mode 100644 pkg/discovery/README.md delete mode 100644 pkg/discovery/backends.go delete mode 100644 pkg/discovery/discovery.go delete mode 100644 pkg/discovery/discovery_test.go delete mode 100644 pkg/discovery/entry.go delete mode 100644 pkg/discovery/file/file.go delete mode 100644 pkg/discovery/file/file_test.go delete mode 100644 pkg/discovery/generator.go delete mode 100644 pkg/discovery/generator_test.go delete mode 100644 pkg/discovery/kv/kv.go delete mode 100644 pkg/discovery/kv/kv_test.go delete mode 100644 pkg/discovery/memory/memory.go delete mode 100644 pkg/discovery/memory/memory_test.go delete mode 100644 pkg/discovery/nodes/nodes.go delete mode 100644 pkg/discovery/nodes/nodes_test.go delete mode 100644 pkg/filenotify/filenotify.go delete mode 100644 pkg/filenotify/fsnotify.go delete mode 100644 pkg/filenotify/poller.go delete mode 100644 pkg/filenotify/poller_test.go delete mode 100644 pkg/gitutils/gitutils.go delete mode 100644 pkg/gitutils/gitutils_test.go delete mode 100644 pkg/graphdb/conn_sqlite3.go delete mode 100644 pkg/graphdb/conn_sqlite3_unix.go delete mode 100644 pkg/graphdb/conn_sqlite3_windows.go delete mode 100644 pkg/graphdb/conn_unsupported.go delete mode 100644 pkg/graphdb/graphdb.go delete mode 100644 pkg/graphdb/graphdb_test.go delete mode 100644 pkg/graphdb/sort.go delete mode 100644 pkg/graphdb/sort_test.go delete mode 100644 pkg/graphdb/utils.go delete mode 100644 pkg/httputils/httputils.go delete mode 100644 pkg/httputils/httputils_test.go delete mode 100644 pkg/httputils/mimetype.go delete mode 100644 pkg/httputils/mimetype_test.go delete mode 100644 pkg/httputils/resumablerequestreader.go delete mode 100644 pkg/httputils/resumablerequestreader_test.go delete mode 100644 pkg/jsonlog/jsonlog.go delete mode 100644 pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 pkg/jsonlog/jsonlog_marshalling_test.go delete mode 100644 pkg/jsonlog/jsonlogbytes.go delete mode 100644 pkg/jsonlog/jsonlogbytes_test.go delete mode 100644 pkg/jsonlog/time_marshalling.go delete mode 100644 pkg/jsonlog/time_marshalling_test.go delete mode 100644 pkg/listeners/listeners_solaris.go delete mode 100644 pkg/listeners/listeners_unix.go delete mode 100644 pkg/listeners/listeners_windows.go delete mode 100644 pkg/locker/README.md delete mode 100644 pkg/locker/locker.go delete mode 100644 pkg/locker/locker_test.go delete mode 100644 pkg/namesgenerator/cmd/names-generator/main.go delete mode 100644 pkg/namesgenerator/names-generator.go delete mode 100644 pkg/namesgenerator/names-generator_test.go delete mode 100644 pkg/platform/architecture_linux.go delete mode 100644 pkg/platform/architecture_unix.go delete mode 100644 pkg/platform/architecture_windows.go delete mode 100644 pkg/platform/platform.go delete mode 100644 pkg/platform/utsname_int8.go delete mode 100644 pkg/platform/utsname_uint8.go delete mode 100644 pkg/progress/progress.go delete mode 100644 pkg/progress/progressreader.go delete mode 100644 pkg/progress/progressreader_test.go delete mode 100644 pkg/pubsub/publisher.go delete mode 100644 pkg/pubsub/publisher_test.go delete mode 100644 pkg/registrar/registrar.go delete mode 100644 pkg/registrar/registrar_test.go delete mode 100644 pkg/signal/README.md delete mode 100644 pkg/signal/signal.go delete mode 100644 pkg/signal/signal_darwin.go delete mode 100644 pkg/signal/signal_freebsd.go delete mode 100644 pkg/signal/signal_linux.go delete mode 100644 pkg/signal/signal_solaris.go delete mode 100644 pkg/signal/signal_unix.go delete mode 100644 pkg/signal/signal_unsupported.go delete mode 100644 pkg/signal/signal_windows.go delete mode 100644 pkg/signal/trap.go delete mode 100644 pkg/stdcopy/stdcopy.go delete mode 100644 pkg/stdcopy/stdcopy_test.go delete mode 100644 pkg/streamformatter/streamformatter.go delete mode 100644 pkg/streamformatter/streamformatter_test.go delete mode 100644 pkg/symlink/LICENSE.APACHE delete mode 100644 pkg/symlink/LICENSE.BSD delete mode 100644 pkg/symlink/README.md delete mode 100644 pkg/symlink/fs.go delete mode 100644 pkg/symlink/fs_unix.go delete mode 100644 pkg/symlink/fs_unix_test.go delete mode 100644 pkg/symlink/fs_windows.go delete mode 100644 pkg/tailfile/tailfile.go delete mode 100644 pkg/tailfile/tailfile_test.go delete mode 100644 pkg/tarsum/builder_context.go delete mode 100644 pkg/tarsum/builder_context_test.go delete mode 100644 pkg/tarsum/fileinfosums.go delete mode 100644 pkg/tarsum/fileinfosums_test.go delete mode 100644 pkg/tarsum/tarsum.go delete mode 100644 pkg/tarsum/tarsum_spec.md delete mode 100644 pkg/tarsum/tarsum_test.go delete mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json delete mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar delete mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json delete mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-0.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-1.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-2.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-3.tar delete mode 100644 pkg/tarsum/testdata/xattr/json delete mode 100644 pkg/tarsum/testdata/xattr/layer.tar delete mode 100644 pkg/tarsum/versioning.go delete mode 100644 pkg/tarsum/versioning_test.go delete mode 100644 pkg/tarsum/writercloser.go delete mode 100644 pkg/term/ascii.go delete mode 100644 pkg/term/ascii_test.go delete mode 100644 pkg/term/tc_linux_cgo.go delete mode 100644 pkg/term/tc_other.go delete mode 100644 pkg/term/tc_solaris_cgo.go delete mode 100644 pkg/term/term.go delete mode 100644 pkg/term/term_solaris.go delete mode 100644 pkg/term/term_unix.go delete mode 100644 pkg/term/term_windows.go delete mode 100644 pkg/term/termios_darwin.go delete mode 100644 pkg/term/termios_freebsd.go delete mode 100644 pkg/term/termios_linux.go delete mode 100644 pkg/term/termios_openbsd.go delete mode 100644 pkg/term/windows/ansi_reader.go delete mode 100644 pkg/term/windows/ansi_writer.go delete mode 100644 pkg/term/windows/console.go delete mode 100644 pkg/term/windows/windows.go delete mode 100644 pkg/term/windows/windows_test.go delete mode 100644 pkg/tlsconfig/config.go delete mode 100644 pkg/truncindex/truncindex.go delete mode 100644 pkg/truncindex/truncindex_test.go delete mode 100644 pkg/urlutil/urlutil.go delete mode 100644 pkg/urlutil/urlutil_test.go delete mode 100644 pkg/useragent/README.md delete mode 100644 pkg/useragent/useragent.go delete mode 100644 pkg/useragent/useragent_test.go delete mode 100644 plugin/backend.go delete mode 100644 plugin/distribution/pull.go delete mode 100644 plugin/distribution/push.go delete mode 100644 plugin/distribution/types.go delete mode 100644 plugin/interface.go delete mode 100644 plugin/legacy.go delete mode 100644 profiles/apparmor/apparmor.go delete mode 100644 profiles/apparmor/template.go delete mode 100755 profiles/seccomp/default.json delete mode 100755 profiles/seccomp/fixtures/example.json delete mode 100644 profiles/seccomp/generate.go delete mode 100644 profiles/seccomp/seccomp.go delete mode 100644 profiles/seccomp/seccomp_default.go delete mode 100644 profiles/seccomp/seccomp_test.go delete mode 100644 profiles/seccomp/seccomp_unsupported.go delete mode 100644 reference/reference.go delete mode 100644 reference/reference_test.go delete mode 100644 reference/store.go delete mode 100644 reference/store_test.go delete mode 100644 registry/auth.go delete mode 100644 registry/auth_test.go delete mode 100644 registry/config.go delete mode 100644 registry/config_test.go delete mode 100644 registry/config_unix.go delete mode 100644 registry/config_windows.go delete mode 100644 registry/endpoint_test.go delete mode 100644 registry/endpoint_v1.go delete mode 100644 registry/reference.go delete mode 100644 registry/registry.go delete mode 100644 registry/registry_mock_test.go delete mode 100644 registry/registry_test.go delete mode 100644 registry/service.go delete mode 100644 registry/service_v1.go delete mode 100644 registry/service_v2.go delete mode 100644 registry/session.go delete mode 100644 registry/types.go delete mode 100644 restartmanager/restartmanager.go delete mode 100644 restartmanager/restartmanager_test.go delete mode 100644 runconfig/compare.go delete mode 100644 runconfig/compare_test.go delete mode 100644 runconfig/config.go delete mode 100644 runconfig/config_test.go delete mode 100644 runconfig/config_unix.go delete mode 100644 runconfig/config_windows.go delete mode 100644 runconfig/errors.go delete mode 100644 runconfig/fixtures/unix/container_config_1_14.json delete mode 100644 runconfig/fixtures/unix/container_config_1_17.json delete mode 100644 runconfig/fixtures/unix/container_config_1_19.json delete mode 100644 runconfig/fixtures/unix/container_hostconfig_1_14.json delete mode 100644 runconfig/fixtures/unix/container_hostconfig_1_19.json delete mode 100644 runconfig/fixtures/windows/container_config_1_19.json delete mode 100644 runconfig/hostconfig.go delete mode 100644 runconfig/hostconfig_solaris.go delete mode 100644 runconfig/hostconfig_test.go delete mode 100644 runconfig/hostconfig_unix.go delete mode 100644 runconfig/hostconfig_windows.go delete mode 100644 runconfig/opts/envfile.go delete mode 100644 runconfig/opts/envfile_test.go delete mode 100644 runconfig/opts/fixtures/valid.env delete mode 100644 runconfig/opts/fixtures/valid.label delete mode 100644 runconfig/opts/opts.go delete mode 100644 runconfig/opts/opts_test.go delete mode 100644 runconfig/opts/parse.go delete mode 100644 runconfig/opts/parse_test.go delete mode 100644 runconfig/opts/runtime.go delete mode 100644 runconfig/opts/throttledevice.go delete mode 100644 runconfig/opts/ulimit.go delete mode 100644 runconfig/opts/ulimit_test.go delete mode 100644 runconfig/opts/weightdevice.go delete mode 100644 runconfig/streams.go delete mode 100644 utils/debug.go delete mode 100644 utils/debug_test.go delete mode 100644 utils/experimental.go delete mode 100644 utils/names.go delete mode 100644 utils/process_unix.go delete mode 100644 utils/process_windows.go delete mode 100644 utils/stubs.go delete mode 100644 utils/templates/templates.go delete mode 100644 utils/templates/templates_test.go delete mode 100644 utils/utils.go delete mode 100644 utils/utils_test.go delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/Makefile delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/README.md delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/constants.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/context.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/states.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/.gitignore delete mode 100644 vendor/src/github.com/BurntSushi/toml/.travis.yml delete mode 100644 vendor/src/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/src/github.com/BurntSushi/toml/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/Makefile delete mode 100644 vendor/src/github.com/BurntSushi/toml/README.md delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/decode.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/decode_meta.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/doc.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encode.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/lex.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/parse.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/session.vim delete mode 100644 vendor/src/github.com/BurntSushi/toml/type_check.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/type_fields.go delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/LICENSE delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go delete mode 100644 vendor/src/github.com/RackSec/srslog/.gitignore delete mode 100644 vendor/src/github.com/RackSec/srslog/.travis.yml delete mode 100644 vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md delete mode 100644 vendor/src/github.com/RackSec/srslog/LICENSE delete mode 100644 vendor/src/github.com/RackSec/srslog/README.md delete mode 100644 vendor/src/github.com/RackSec/srslog/constants.go delete mode 100644 vendor/src/github.com/RackSec/srslog/dialer.go delete mode 100644 vendor/src/github.com/RackSec/srslog/formatter.go delete mode 100644 vendor/src/github.com/RackSec/srslog/framer.go delete mode 100644 vendor/src/github.com/RackSec/srslog/net_conn.go delete mode 100644 vendor/src/github.com/RackSec/srslog/srslog.go delete mode 100644 vendor/src/github.com/RackSec/srslog/srslog_unix.go delete mode 100644 vendor/src/github.com/RackSec/srslog/writer.go delete mode 100644 vendor/src/github.com/agl/ed25519/LICENSE delete mode 100644 vendor/src/github.com/agl/ed25519/ed25519.go delete mode 100644 vendor/src/github.com/agl/ed25519/edwards25519/const.go delete mode 100644 vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go delete mode 100755 vendor/src/github.com/armon/go-metrics/.gitignore delete mode 100644 vendor/src/github.com/armon/go-metrics/LICENSE delete mode 100644 vendor/src/github.com/armon/go-metrics/README.md delete mode 100644 vendor/src/github.com/armon/go-metrics/const_unix.go delete mode 100644 vendor/src/github.com/armon/go-metrics/const_windows.go delete mode 100644 vendor/src/github.com/armon/go-metrics/inmem.go delete mode 100644 vendor/src/github.com/armon/go-metrics/inmem_signal.go delete mode 100755 vendor/src/github.com/armon/go-metrics/metrics.go delete mode 100755 vendor/src/github.com/armon/go-metrics/sink.go delete mode 100755 vendor/src/github.com/armon/go-metrics/start.go delete mode 100644 vendor/src/github.com/armon/go-metrics/statsd.go delete mode 100755 vendor/src/github.com/armon/go-metrics/statsite.go delete mode 100644 vendor/src/github.com/armon/go-radix/.gitignore delete mode 100644 vendor/src/github.com/armon/go-radix/.travis.yml delete mode 100644 vendor/src/github.com/armon/go-radix/LICENSE delete mode 100644 vendor/src/github.com/armon/go-radix/README.md delete mode 100644 vendor/src/github.com/armon/go-radix/radix.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/errors.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/logger.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/version.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go delete mode 100644 vendor/src/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 vendor/src/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/src/github.com/boltdb/bolt/.gitignore delete mode 100644 vendor/src/github.com/boltdb/bolt/LICENSE delete mode 100644 vendor/src/github.com/boltdb/bolt/Makefile delete mode 100644 vendor/src/github.com/boltdb/bolt/README.md delete mode 100644 vendor/src/github.com/boltdb/bolt/appveyor.yml delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_386.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_amd64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_linux.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_openbsd.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_s390x.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_windows.go delete mode 100644 vendor/src/github.com/boltdb/bolt/boltsync_unix.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bucket.go delete mode 100644 vendor/src/github.com/boltdb/bolt/cursor.go delete mode 100644 vendor/src/github.com/boltdb/bolt/db.go delete mode 100644 vendor/src/github.com/boltdb/bolt/doc.go delete mode 100644 vendor/src/github.com/boltdb/bolt/errors.go delete mode 100644 vendor/src/github.com/boltdb/bolt/freelist.go delete mode 100644 vendor/src/github.com/boltdb/bolt/node.go delete mode 100644 vendor/src/github.com/boltdb/bolt/page.go delete mode 100644 vendor/src/github.com/boltdb/bolt/tx.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/LICENSE delete mode 100644 vendor/src/github.com/cloudflare/cfssl/api/api.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/auth/auth.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/certdb/README.md delete mode 100644 vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/config/config.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/csr/csr.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/doc.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/error.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/http.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/info/info.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/initca/initca.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/log/log.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/signer/local/local.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/signer/signer.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE delete mode 100644 vendor/src/github.com/coreos/etcd/LICENSE delete mode 100644 vendor/src/github.com/coreos/etcd/client/README.md delete mode 100644 vendor/src/github.com/coreos/etcd/client/auth_role.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/auth_user.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cancelreq.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/client.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cluster_error.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/curl.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/discover.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/keys.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/members.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/crc/crc.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/idutil/id.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/id.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/set.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/slice.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/urls.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/design.md delete mode 100644 vendor/src/github.com/coreos/etcd/raft/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/log.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/log_unstable.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/logger.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/node.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/progress.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raft.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto delete mode 100644 vendor/src/github.com/coreos/etcd/raft/rawnode.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/status.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/storage.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/db.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/message.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/metrics.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snapshotter.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/decoder.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/encoder.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/metrics.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/repair.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/wal.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.proto delete mode 100644 vendor/src/github.com/coreos/go-systemd/LICENSE delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/files.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/listeners.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/packetconns.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/journal/journal.go delete mode 100644 vendor/src/github.com/coreos/pkg/LICENSE delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/README.md delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/formatters.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/init.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/init_windows.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/logmap.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/.gitignore delete mode 100644 vendor/src/github.com/deckarep/golang-set/.travis.yml delete mode 100644 vendor/src/github.com/deckarep/golang-set/LICENSE delete mode 100644 vendor/src/github.com/deckarep/golang-set/README.md delete mode 100644 vendor/src/github.com/deckarep/golang-set/set.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/threadsafe.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/threadunsafe.go delete mode 100644 vendor/src/github.com/docker/containerd/LICENSE.code delete mode 100644 vendor/src/github.com/docker/containerd/LICENSE.docs delete mode 100644 vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go delete mode 100644 vendor/src/github.com/docker/containerd/api/grpc/types/api.proto delete mode 100644 vendor/src/github.com/docker/distribution/.gitignore delete mode 100644 vendor/src/github.com/docker/distribution/.mailmap delete mode 100644 vendor/src/github.com/docker/distribution/AUTHORS delete mode 100644 vendor/src/github.com/docker/distribution/BUILDING.md delete mode 100644 vendor/src/github.com/docker/distribution/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/distribution/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/distribution/Dockerfile delete mode 100644 vendor/src/github.com/docker/distribution/LICENSE delete mode 100644 vendor/src/github.com/docker/distribution/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/distribution/Makefile delete mode 100644 vendor/src/github.com/docker/distribution/README.md delete mode 100644 vendor/src/github.com/docker/distribution/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/distribution/blobs.go delete mode 100644 vendor/src/github.com/docker/distribution/circle.yml delete mode 100644 vendor/src/github.com/docker/distribution/context/context.go delete mode 100644 vendor/src/github.com/docker/distribution/context/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/context/http.go delete mode 100644 vendor/src/github.com/docker/distribution/context/logger.go delete mode 100644 vendor/src/github.com/docker/distribution/context/trace.go delete mode 100644 vendor/src/github.com/docker/distribution/context/util.go delete mode 100644 vendor/src/github.com/docker/distribution/context/version.go delete mode 100755 vendor/src/github.com/docker/distribution/coverpkg.sh delete mode 100644 vendor/src/github.com/docker/distribution/digest/digest.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/digester.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/set.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/verifiers.go delete mode 100644 vendor/src/github.com/docker/distribution/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/sign.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/verify.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema2/builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/versioned.go delete mode 100644 vendor/src/github.com/docker/distribution/manifests.go delete mode 100644 vendor/src/github.com/docker/distribution/reference/reference.go delete mode 100644 vendor/src/github.com/docker/distribution/reference/regexp.go delete mode 100644 vendor/src/github.com/docker/distribution/registry.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/register.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/routes.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/urls.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/session.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/repository.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/transport.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 vendor/src/github.com/docker/distribution/tags.go delete mode 100644 vendor/src/github.com/docker/distribution/uuid/uuid.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/LICENSE delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 vendor/src/github.com/docker/go-connections/LICENSE delete mode 100644 vendor/src/github.com/docker/go-connections/nat/nat.go delete mode 100644 vendor/src/github.com/docker/go-connections/nat/parse.go delete mode 100644 vendor/src/github.com/docker/go-connections/nat/sort.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/README.md delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/proxy.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/unix_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 vendor/src/github.com/docker/go-events/.gitignore delete mode 100644 vendor/src/github.com/docker/go-events/LICENSE delete mode 100644 vendor/src/github.com/docker/go-events/README.md delete mode 100644 vendor/src/github.com/docker/go-events/broadcast.go delete mode 100644 vendor/src/github.com/docker/go-events/channel.go delete mode 100644 vendor/src/github.com/docker/go-events/errors.go delete mode 100644 vendor/src/github.com/docker/go-events/event.go delete mode 100644 vendor/src/github.com/docker/go-events/filter.go delete mode 100644 vendor/src/github.com/docker/go-events/queue.go delete mode 100644 vendor/src/github.com/docker/go-events/retry.go delete mode 100644 vendor/src/github.com/docker/go/LICENSE delete mode 100644 vendor/src/github.com/docker/go/canonical/json/decode.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/encode.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/fold.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/indent.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/scanner.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/stream.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/tags.go delete mode 100644 vendor/src/github.com/docker/libkv/.travis.yml delete mode 100644 vendor/src/github.com/docker/libkv/LICENSE.code delete mode 100644 vendor/src/github.com/docker/libkv/LICENSE.docs delete mode 100644 vendor/src/github.com/docker/libkv/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libkv/README.md delete mode 100644 vendor/src/github.com/docker/libkv/libkv.go delete mode 100644 vendor/src/github.com/docker/libkv/store/boltdb/boltdb.go delete mode 100644 vendor/src/github.com/docker/libkv/store/consul/consul.go delete mode 100644 vendor/src/github.com/docker/libkv/store/etcd/etcd.go delete mode 100644 vendor/src/github.com/docker/libkv/store/helpers.go delete mode 100644 vendor/src/github.com/docker/libkv/store/store.go delete mode 100644 vendor/src/github.com/docker/libkv/store/zookeeper/zookeeper.go delete mode 100644 vendor/src/github.com/docker/libnetwork/.dockerignore delete mode 100644 vendor/src/github.com/docker/libnetwork/.gitignore delete mode 100644 vendor/src/github.com/docker/libnetwork/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/libnetwork/Dockerfile.build delete mode 100644 vendor/src/github.com/docker/libnetwork/LICENSE delete mode 100644 vendor/src/github.com/docker/libnetwork/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libnetwork/Makefile delete mode 100644 vendor/src/github.com/docker/libnetwork/README.md delete mode 100644 vendor/src/github.com/docker/libnetwork/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.go delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/bitseq/sequence.go delete mode 100644 vendor/src/github.com/docker/libnetwork/bitseq/store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/circle.yml delete mode 100644 vendor/src/github.com/docker/libnetwork/cluster/provider.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/main.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/stub_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/tcp_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/udp_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/config/config.go delete mode 100644 vendor/src/github.com/docker/libnetwork/config/libnetwork.toml delete mode 100644 vendor/src/github.com/docker/libnetwork/controller.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/cache.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/datastore.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/mock_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/errors.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/ipamdata.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/host/host.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_state.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/null/null.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/filter.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_serf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/remote/api/api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/windows/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/windows/windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_experimental_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_ipam.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_stub_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint_cnt.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint_info.go delete mode 100644 vendor/src/github.com/docker/libnetwork/error.go delete mode 100644 vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/libnetwork.toml delete mode 100644 vendor/src/github.com/docker/libnetwork/idm/idm.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/allocator.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/structures.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipamapi/contract.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/null/null.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/remote/remote.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipamutils/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/iptables/firewalld.go delete mode 100644 vendor/src/github.com/docker/libnetwork/iptables/iptables.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/constants.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/netlink.go delete mode 100755 vendor/src/github.com/docker/libnetwork/machines delete mode 100644 vendor/src/github.com/docker/libnetwork/netlabel/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/cluster.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/delegate.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/event_delegate.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/message.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/watch.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ns/init_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/options/options.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/options_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/route_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/mapper.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/README.md delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/types/types.go delete mode 100755 vendor/src/github.com/docker/libnetwork/wrapmake.sh delete mode 100644 vendor/src/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/libtrust/LICENSE delete mode 100644 vendor/src/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libtrust/README.md delete mode 100644 vendor/src/github.com/docker/libtrust/certificates.go delete mode 100644 vendor/src/github.com/docker/libtrust/doc.go delete mode 100644 vendor/src/github.com/docker/libtrust/ec_key.go delete mode 100644 vendor/src/github.com/docker/libtrust/filter.go delete mode 100644 vendor/src/github.com/docker/libtrust/hash.go delete mode 100644 vendor/src/github.com/docker/libtrust/jsonsign.go delete mode 100644 vendor/src/github.com/docker/libtrust/key.go delete mode 100644 vendor/src/github.com/docker/libtrust/key_files.go delete mode 100644 vendor/src/github.com/docker/libtrust/key_manager.go delete mode 100644 vendor/src/github.com/docker/libtrust/rsa_key.go delete mode 100644 vendor/src/github.com/docker/libtrust/util.go delete mode 100644 vendor/src/github.com/docker/notary/.gitignore delete mode 100644 vendor/src/github.com/docker/notary/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/notary/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/notary/CONTRIBUTORS delete mode 100644 vendor/src/github.com/docker/notary/Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/LICENSE delete mode 100644 vendor/src/github.com/docker/notary/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/notary/Makefile delete mode 100644 vendor/src/github.com/docker/notary/NOTARY_VERSION delete mode 100644 vendor/src/github.com/docker/notary/README.md delete mode 100644 vendor/src/github.com/docker/notary/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/notary/circle.yml delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/change.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/changelist.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/file_changelist.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/interface.go delete mode 100644 vendor/src/github.com/docker/notary/client/client.go delete mode 100644 vendor/src/github.com/docker/notary/client/delegations.go delete mode 100644 vendor/src/github.com/docker/notary/client/helpers.go delete mode 100644 vendor/src/github.com/docker/notary/client/repo.go delete mode 100644 vendor/src/github.com/docker/notary/client/repo_pkcs11.go delete mode 100644 vendor/src/github.com/docker/notary/codecov.yml delete mode 100644 vendor/src/github.com/docker/notary/const.go delete mode 100755 vendor/src/github.com/docker/notary/coverpkg.sh delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/certificate.go delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/import_export.go delete mode 100644 vendor/src/github.com/docker/notary/development.rethink.yml delete mode 100644 vendor/src/github.com/docker/notary/development.yml delete mode 100644 vendor/src/github.com/docker/notary/docker-compose.rethink.yml delete mode 100644 vendor/src/github.com/docker/notary/docker-compose.yml delete mode 100644 vendor/src/github.com/docker/notary/passphrase/passphrase.go delete mode 100644 vendor/src/github.com/docker/notary/server.Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/signer.Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/filestore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/keyfilestore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/keystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/memorystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/store.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/x509utils.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/non_pkcs11.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_darwin.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_interface.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustpinning/certs.go delete mode 100644 vendor/src/github.com/docker/notary/trustpinning/trustpin.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/LICENSE delete mode 100644 vendor/src/github.com/docker/notary/tuf/README.md delete mode 100644 vendor/src/github.com/docker/notary/tuf/builder.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/client/client.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/client/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/keys.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/roles.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/root.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/serializer.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/snapshot.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/targets.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/timestamp.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/types.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/ed25519.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/interface.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/sign.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/verifiers.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/verify.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/filestore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/httpstore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/interfaces.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/memorystore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/offlinestore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/tuf.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/role_sort.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/stack.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/util.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/utils.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/validation/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/LICENSE delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/agent.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/config.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/controller.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/executor.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/helpers.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/node.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/reporter.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/session.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/storage.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/task.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/worker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/README.md delete mode 100644 vendor/src/github.com/docker/swarmkit/api/ca.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/ca.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/control.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/control.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/dispatcher.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/duration.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/health.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/health.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/objects.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/objects.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/raft.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/raft.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/snapshot.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/specs.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/specs.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/types.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/types.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/auth.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/certificates.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/config.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/external.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/forward.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/server.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/transport.go delete mode 100644 vendor/src/github.com/docker/swarmkit/identity/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/identity/randomid.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go delete mode 100644 vendor/src/github.com/docker/swarmkit/log/context.go delete mode 100644 vendor/src/github.com/docker/swarmkit/log/grpc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/network.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/health/health.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/manager.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/proposer.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/by.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/object.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/services.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/watch.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go delete mode 100644 vendor/src/github.com/docker/swarmkit/picker/picker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/LICENSE delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/fluent.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/proto.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/version.go delete mode 100644 vendor/src/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/src/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/src/github.com/go-ini/ini/README.md delete mode 100644 vendor/src/github.com/go-ini/ini/README_ZH.md delete mode 100644 vendor/src/github.com/go-ini/ini/ini.go delete mode 100644 vendor/src/github.com/go-ini/ini/struct.go delete mode 100644 vendor/src/github.com/godbus/dbus/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/godbus/dbus/LICENSE delete mode 100644 vendor/src/github.com/godbus/dbus/MAINTAINERS delete mode 100644 vendor/src/github.com/godbus/dbus/README.markdown delete mode 100644 vendor/src/github.com/godbus/dbus/auth.go delete mode 100644 vendor/src/github.com/godbus/dbus/auth_external.go delete mode 100644 vendor/src/github.com/godbus/dbus/auth_sha1.go delete mode 100644 vendor/src/github.com/godbus/dbus/call.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn_darwin.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn_other.go delete mode 100644 vendor/src/github.com/godbus/dbus/dbus.go delete mode 100644 vendor/src/github.com/godbus/dbus/decoder.go delete mode 100644 vendor/src/github.com/godbus/dbus/doc.go delete mode 100644 vendor/src/github.com/godbus/dbus/encoder.go delete mode 100644 vendor/src/github.com/godbus/dbus/export.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir_dynamic.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir_static.go delete mode 100644 vendor/src/github.com/godbus/dbus/message.go delete mode 100644 vendor/src/github.com/godbus/dbus/object.go delete mode 100644 vendor/src/github.com/godbus/dbus/sig.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_darwin.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_generic.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_tcp.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unix.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unixcred_linux.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant_lexer.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant_parser.go delete mode 100644 vendor/src/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/doc.go delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/helper.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go delete mode 100644 vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go delete mode 100644 vendor/src/github.com/golang/mock/LICENSE delete mode 100644 vendor/src/github.com/golang/mock/gomock/call.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/callset.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/controller.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/matchers.go delete mode 100644 vendor/src/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/src/github.com/golang/protobuf/proto/Makefile delete mode 100644 vendor/src/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/LICENSE delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/README.md delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/common.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/client/logclient.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/serialization.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/signatures.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/types.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/sec1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/verify.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/x509.go delete mode 100644 vendor/src/github.com/gorilla/context/.travis.yml delete mode 100644 vendor/src/github.com/gorilla/context/LICENSE delete mode 100644 vendor/src/github.com/gorilla/context/README.md delete mode 100644 vendor/src/github.com/gorilla/context/context.go delete mode 100644 vendor/src/github.com/gorilla/context/doc.go delete mode 100644 vendor/src/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/src/github.com/gorilla/mux/LICENSE delete mode 100644 vendor/src/github.com/gorilla/mux/README.md delete mode 100644 vendor/src/github.com/gorilla/mux/doc.go delete mode 100644 vendor/src/github.com/gorilla/mux/mux.go delete mode 100644 vendor/src/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/src/github.com/gorilla/mux/route.go delete mode 100644 vendor/src/github.com/hashicorp/consul/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/src/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/src/github.com/hashicorp/consul/website/LICENSE.md delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/edges.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/iter.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/node.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/index.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/memdb.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/schema.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/txn.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/0doc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/binc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/decode.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/encode.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/helper.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack.go delete mode 100755 vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/rpc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/simple.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/time.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/src/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/memberlist/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/memberlist/Makefile delete mode 100644 vendor/src/github.com/hashicorp/memberlist/README.md delete mode 100644 vendor/src/github.com/hashicorp/memberlist/alive_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/broadcast.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/config.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/conflict_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/event_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/keyring.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/logging.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/memberlist.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/merge_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/net.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/ping_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/queue.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/security.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/state.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/todo.md delete mode 100644 vendor/src/github.com/hashicorp/memberlist/util.go delete mode 100644 vendor/src/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/broadcast.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce_member.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce_user.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/config.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/conflict_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/event.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/event_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/internal_query.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/keymanager.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/lamport.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/merge_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/messages.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/ping_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/query.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/serf.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/snapshot.go delete mode 100644 vendor/src/github.com/hashicorp/serf/website/source/LICENSE delete mode 100644 vendor/src/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/src/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/src/github.com/imdario/mergo/README.md delete mode 100644 vendor/src/github.com/imdario/mergo/doc.go delete mode 100644 vendor/src/github.com/imdario/mergo/map.go delete mode 100644 vendor/src/github.com/imdario/mergo/merge.go delete mode 100644 vendor/src/github.com/imdario/mergo/mergo.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/LICENSE delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/README.md delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_others.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_windows.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/util.go delete mode 100644 vendor/src/github.com/kr/pty/.gitignore delete mode 100644 vendor/src/github.com/kr/pty/License delete mode 100644 vendor/src/github.com/kr/pty/README.md delete mode 100644 vendor/src/github.com/kr/pty/doc.go delete mode 100644 vendor/src/github.com/kr/pty/ioctl.go delete mode 100644 vendor/src/github.com/kr/pty/ioctl_bsd.go delete mode 100755 vendor/src/github.com/kr/pty/mktypes.bash delete mode 100644 vendor/src/github.com/kr/pty/pty_darwin.go delete mode 100644 vendor/src/github.com/kr/pty/pty_freebsd.go delete mode 100644 vendor/src/github.com/kr/pty/pty_linux.go delete mode 100644 vendor/src/github.com/kr/pty/pty_unsupported.go delete mode 100644 vendor/src/github.com/kr/pty/run.go delete mode 100644 vendor/src/github.com/kr/pty/types.go delete mode 100644 vendor/src/github.com/kr/pty/types_freebsd.go delete mode 100644 vendor/src/github.com/kr/pty/util.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_386.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_amd64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_arm.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_arm64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_386.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_ppc64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_ppc64le.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_s390x.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/.travis.yml delete mode 100644 vendor/src/github.com/mattn/go-shellwords/README.md delete mode 100644 vendor/src/github.com/mattn/go-shellwords/shellwords.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/util_posix.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/util_windows.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/.gitignore delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/.travis.yml delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/LICENSE delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/README.md delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/backup.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3-binding.c delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3-binding.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3ext.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/doc.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/error.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_icu.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_load_extension.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_omit_load_extension.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_other.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_windows.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/src/github.com/miekg/dns/.gitignore delete mode 100644 vendor/src/github.com/miekg/dns/.travis.yml delete mode 100644 vendor/src/github.com/miekg/dns/AUTHORS delete mode 100644 vendor/src/github.com/miekg/dns/CONTRIBUTORS delete mode 100644 vendor/src/github.com/miekg/dns/COPYRIGHT delete mode 100644 vendor/src/github.com/miekg/dns/LICENSE delete mode 100644 vendor/src/github.com/miekg/dns/README.md delete mode 100644 vendor/src/github.com/miekg/dns/client.go delete mode 100644 vendor/src/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/src/github.com/miekg/dns/defaults.go delete mode 100644 vendor/src/github.com/miekg/dns/dns.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/src/github.com/miekg/dns/doc.go delete mode 100644 vendor/src/github.com/miekg/dns/edns.go delete mode 100644 vendor/src/github.com/miekg/dns/format.go delete mode 100644 vendor/src/github.com/miekg/dns/labels.go delete mode 100644 vendor/src/github.com/miekg/dns/msg.go delete mode 100644 vendor/src/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/src/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/src/github.com/miekg/dns/rawmsg.go delete mode 100644 vendor/src/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/src/github.com/miekg/dns/scanner.go delete mode 100644 vendor/src/github.com/miekg/dns/server.go delete mode 100644 vendor/src/github.com/miekg/dns/sig0.go delete mode 100644 vendor/src/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/src/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/src/github.com/miekg/dns/tsig.go delete mode 100644 vendor/src/github.com/miekg/dns/types.go delete mode 100644 vendor/src/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/src/github.com/miekg/dns/udp.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_linux.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_other.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/src/github.com/miekg/dns/update.go delete mode 100644 vendor/src/github.com/miekg/dns/xfr.go delete mode 100644 vendor/src/github.com/miekg/dns/zgenerate.go delete mode 100644 vendor/src/github.com/miekg/dns/zscan.go delete mode 100644 vendor/src/github.com/miekg/dns/zscan_rr.go delete mode 100644 vendor/src/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/.gitignore delete mode 100644 vendor/src/github.com/miekg/pkcs11/.travis.yml delete mode 100644 vendor/src/github.com/miekg/pkcs11/LICENSE delete mode 100644 vendor/src/github.com/miekg/pkcs11/README.md delete mode 100644 vendor/src/github.com/miekg/pkcs11/const.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/error.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/hsm.db delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11f.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11t.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/softhsm.conf delete mode 100644 vendor/src/github.com/miekg/pkcs11/types.go delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/.gitignore delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/.travis.yml delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/LICENSE delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/README.md delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/httpclient.go delete mode 100644 vendor/src/github.com/philhofer/fwd/README.md delete mode 100644 vendor/src/github.com/philhofer/fwd/reader.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer_appengine.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer_unsafe.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/LICENSE delete mode 100644 vendor/src/github.com/pivotal-golang/clock/README.md delete mode 100644 vendor/src/github.com/pivotal-golang/clock/clock.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/ticker.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/timer.go delete mode 100644 vendor/src/github.com/pkg/errors/.gitignore delete mode 100644 vendor/src/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/src/github.com/pkg/errors/LICENSE delete mode 100644 vendor/src/github.com/pkg/errors/README.md delete mode 100644 vendor/src/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/src/github.com/pkg/errors/errors.go delete mode 100644 vendor/src/github.com/pkg/errors/stack.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/LICENSE delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/push.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 vendor/src/github.com/prometheus/client_model/LICENSE delete mode 100644 vendor/src/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 vendor/src/github.com/prometheus/client_model/ruby/LICENSE delete mode 100644 vendor/src/github.com/prometheus/common/LICENSE delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/decode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/encode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/json_decode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 vendor/src/github.com/prometheus/common/model/alert.go delete mode 100644 vendor/src/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 vendor/src/github.com/prometheus/common/model/labels.go delete mode 100644 vendor/src/github.com/prometheus/common/model/labelset.go delete mode 100644 vendor/src/github.com/prometheus/common/model/metric.go delete mode 100644 vendor/src/github.com/prometheus/common/model/model.go delete mode 100644 vendor/src/github.com/prometheus/common/model/signature.go delete mode 100644 vendor/src/github.com/prometheus/common/model/silence.go delete mode 100644 vendor/src/github.com/prometheus/common/model/time.go delete mode 100644 vendor/src/github.com/prometheus/common/model/value.go delete mode 100644 vendor/src/github.com/prometheus/procfs/.travis.yml delete mode 100644 vendor/src/github.com/prometheus/procfs/AUTHORS.md delete mode 100644 vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/prometheus/procfs/LICENSE delete mode 100644 vendor/src/github.com/prometheus/procfs/Makefile delete mode 100644 vendor/src/github.com/prometheus/procfs/NOTICE delete mode 100644 vendor/src/github.com/prometheus/procfs/README.md delete mode 100644 vendor/src/github.com/prometheus/procfs/doc.go delete mode 100644 vendor/src/github.com/prometheus/procfs/fs.go delete mode 100644 vendor/src/github.com/prometheus/procfs/ipvs.go delete mode 100644 vendor/src/github.com/prometheus/procfs/mdstat.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_io.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_limits.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_stat.go delete mode 100644 vendor/src/github.com/prometheus/procfs/stat.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/LICENSE delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/conn.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/constants.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/flw.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/lock.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/server_help.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/server_java.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/structs.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/tracer.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/util.go delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/LICENSE delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/README delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/seccomp.go delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go delete mode 100644 vendor/src/github.com/spf13/cobra/.gitignore delete mode 100644 vendor/src/github.com/spf13/cobra/.mailmap delete mode 100644 vendor/src/github.com/spf13/cobra/.travis.yml delete mode 100644 vendor/src/github.com/spf13/cobra/LICENSE.txt delete mode 100644 vendor/src/github.com/spf13/cobra/README.md delete mode 100644 vendor/src/github.com/spf13/cobra/args.go delete mode 100644 vendor/src/github.com/spf13/cobra/bash_completions.go delete mode 100644 vendor/src/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/src/github.com/spf13/cobra/cobra.go delete mode 100644 vendor/src/github.com/spf13/cobra/command.go delete mode 100644 vendor/src/github.com/spf13/cobra/command_notwin.go delete mode 100644 vendor/src/github.com/spf13/cobra/command_win.go delete mode 100644 vendor/src/github.com/spf13/pflag/.travis.yml delete mode 100644 vendor/src/github.com/spf13/pflag/LICENSE delete mode 100644 vendor/src/github.com/spf13/pflag/README.md delete mode 100644 vendor/src/github.com/spf13/pflag/bool.go delete mode 100644 vendor/src/github.com/spf13/pflag/count.go delete mode 100644 vendor/src/github.com/spf13/pflag/duration.go delete mode 100644 vendor/src/github.com/spf13/pflag/flag.go delete mode 100644 vendor/src/github.com/spf13/pflag/float32.go delete mode 100644 vendor/src/github.com/spf13/pflag/float64.go delete mode 100644 vendor/src/github.com/spf13/pflag/golangflag.go delete mode 100644 vendor/src/github.com/spf13/pflag/int.go delete mode 100644 vendor/src/github.com/spf13/pflag/int32.go delete mode 100644 vendor/src/github.com/spf13/pflag/int64.go delete mode 100644 vendor/src/github.com/spf13/pflag/int8.go delete mode 100644 vendor/src/github.com/spf13/pflag/int_slice.go delete mode 100644 vendor/src/github.com/spf13/pflag/ip.go delete mode 100644 vendor/src/github.com/spf13/pflag/ipmask.go delete mode 100644 vendor/src/github.com/spf13/pflag/ipnet.go delete mode 100644 vendor/src/github.com/spf13/pflag/string.go delete mode 100644 vendor/src/github.com/spf13/pflag/string_slice.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint16.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint32.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint64.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint8.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/LICENSE delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability_noop.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/enum.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/syscall_linux.go delete mode 100644 vendor/src/github.com/tchap/go-patricia/LICENSE delete mode 100644 vendor/src/github.com/tchap/go-patricia/patricia/children.go delete mode 100644 vendor/src/github.com/tchap/go-patricia/patricia/patricia.go delete mode 100644 vendor/src/github.com/tinylib/msgp/LICENSE delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/circular.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/defs.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/edit.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/elsize.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/errors.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/extension.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/integers.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/json.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/json_bytes.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number_appengine.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number_unsafe.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/read.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/read_bytes.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/size.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/write.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/write_bytes.go delete mode 100644 vendor/src/github.com/ugorji/go/LICENSE delete mode 100644 vendor/src/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/src/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/prebuild.go delete mode 100755 vendor/src/github.com/ugorji/go/codec/prebuild.sh delete mode 100644 vendor/src/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100755 vendor/src/github.com/ugorji/go/codec/test.py delete mode 100755 vendor/src/github.com/ugorji/go/codec/tests.sh delete mode 100644 vendor/src/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/.gitignore delete mode 100644 vendor/src/github.com/vdemeester/shakers/Dockerfile delete mode 100644 vendor/src/github.com/vdemeester/shakers/LICENSE delete mode 100644 vendor/src/github.com/vdemeester/shakers/Makefile delete mode 100644 vendor/src/github.com/vdemeester/shakers/README.md delete mode 100644 vendor/src/github.com/vdemeester/shakers/bool.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/circle.yml delete mode 100644 vendor/src/github.com/vdemeester/shakers/common.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/glide.yaml delete mode 100644 vendor/src/github.com/vdemeester/shakers/string.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/time.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/.travis.yml delete mode 100644 vendor/src/github.com/vishvananda/netlink/LICENSE delete mode 100644 vendor/src/github.com/vishvananda/netlink/Makefile delete mode 100644 vendor/src/github.com/vishvananda/netlink/README.md delete mode 100644 vendor/src/github.com/vishvananda/netlink/addr.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/addr_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/bpf_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/class.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/class_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/filter.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/filter_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/handle_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link_tuntap_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/neigh.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/neigh_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/link_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/route_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/syscall.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/protinfo.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/protinfo_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/qdisc.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/qdisc_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route_unspecified.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/rule.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/rule_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_policy.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_state.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netns/LICENSE delete mode 100644 vendor/src/github.com/vishvananda/netns/README.md delete mode 100644 vendor/src/github.com/vishvananda/netns/netns.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_386.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_amd64.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_arm.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_arm64.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_ppc64le.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_s390x.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_unspecified.go delete mode 100644 vendor/src/golang.org/x/crypto/LICENSE delete mode 100644 vendor/src/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/src/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/src/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/src/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/crypto.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/errors.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/mac.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/safebags.go delete mode 100644 vendor/src/golang.org/x/net/LICENSE delete mode 100644 vendor/src/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/src/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/src/golang.org/x/net/http2/Makefile delete mode 100644 vendor/src/golang.org/x/net/http2/README delete mode 100644 vendor/src/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/src/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/src/golang.org/x/net/http2/errors.go delete mode 100644 vendor/src/golang.org/x/net/http2/fixed_buffer.go delete mode 100644 vendor/src/golang.org/x/net/http2/flow.go delete mode 100644 vendor/src/golang.org/x/net/http2/frame.go delete mode 100644 vendor/src/golang.org/x/net/http2/go15.go delete mode 100644 vendor/src/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/src/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/src/golang.org/x/net/http2/http2.go delete mode 100644 vendor/src/golang.org/x/net/http2/not_go15.go delete mode 100644 vendor/src/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/src/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/src/golang.org/x/net/http2/server.go delete mode 100644 vendor/src/golang.org/x/net/http2/transport.go delete mode 100644 vendor/src/golang.org/x/net/http2/write.go delete mode 100644 vendor/src/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/src/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/src/golang.org/x/net/proxy/direct.go delete mode 100644 vendor/src/golang.org/x/net/proxy/per_host.go delete mode 100644 vendor/src/golang.org/x/net/proxy/proxy.go delete mode 100644 vendor/src/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/src/golang.org/x/net/trace/events.go delete mode 100644 vendor/src/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/src/golang.org/x/net/trace/trace.go delete mode 100644 vendor/src/golang.org/x/net/websocket/client.go delete mode 100644 vendor/src/golang.org/x/net/websocket/hybi.go delete mode 100644 vendor/src/golang.org/x/net/websocket/server.go delete mode 100644 vendor/src/golang.org/x/net/websocket/websocket.go delete mode 100644 vendor/src/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/src/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/src/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/src/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/src/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/src/golang.org/x/oauth2/README.md delete mode 100644 vendor/src/golang.org/x/oauth2/client_appengine.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appengine.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appengine_hook.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/default.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/google.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/sdk.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/src/golang.org/x/oauth2/jws/jws.go delete mode 100644 vendor/src/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 vendor/src/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/src/golang.org/x/oauth2/token.go delete mode 100644 vendor/src/golang.org/x/oauth2/transport.go delete mode 100644 vendor/src/golang.org/x/sys/LICENSE delete mode 100644 vendor/src/google.golang.org/api/LICENSE delete mode 100644 vendor/src/google.golang.org/api/gensupport/json.go delete mode 100644 vendor/src/google.golang.org/api/gensupport/params.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/googleapi.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/types.go delete mode 100644 vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json delete mode 100644 vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go delete mode 100644 vendor/src/google.golang.org/cloud/.travis.yml delete mode 100644 vendor/src/google.golang.org/cloud/AUTHORS delete mode 100644 vendor/src/google.golang.org/cloud/CONTRIBUTING.md delete mode 100644 vendor/src/google.golang.org/cloud/CONTRIBUTORS delete mode 100644 vendor/src/google.golang.org/cloud/LICENSE delete mode 100644 vendor/src/google.golang.org/cloud/README.md delete mode 100644 vendor/src/google.golang.org/cloud/cloud.go delete mode 100644 vendor/src/google.golang.org/cloud/compute/metadata/metadata.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/cloud.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/opts/option.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/dial.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/proto.go delete mode 100644 vendor/src/google.golang.org/cloud/key.json.enc delete mode 100644 vendor/src/google.golang.org/cloud/logging/logging.go delete mode 100644 vendor/src/google.golang.org/cloud/option.go delete mode 100644 vendor/src/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/src/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/src/google.golang.org/grpc/LICENSE delete mode 100644 vendor/src/google.golang.org/grpc/Makefile delete mode 100644 vendor/src/google.golang.org/grpc/PATENTS delete mode 100644 vendor/src/google.golang.org/grpc/README.md delete mode 100644 vendor/src/google.golang.org/grpc/backoff.go delete mode 100644 vendor/src/google.golang.org/grpc/call.go delete mode 100644 vendor/src/google.golang.org/grpc/clientconn.go delete mode 100755 vendor/src/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/src/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/src/google.golang.org/grpc/codes/codes.go delete mode 100755 vendor/src/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/src/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go delete mode 100644 vendor/src/google.golang.org/grpc/doc.go delete mode 100644 vendor/src/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/src/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/src/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/src/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/src/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/src/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/src/google.golang.org/grpc/picker.go delete mode 100644 vendor/src/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/src/google.golang.org/grpc/server.go delete mode 100644 vendor/src/google.golang.org/grpc/stream.go delete mode 100644 vendor/src/google.golang.org/grpc/trace.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/control.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/handler_server.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http2_client.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http2_server.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http_util.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/transport.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/.gitignore delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/.travis.yml delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/AUTHORS delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/CHANGELOG.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/LICENSE delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/README.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/fen.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/fsnotify.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/inotify.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/inotify_poller.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/kqueue.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/open_mode_bsd.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/open_mode_darwin.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/windows.go delete mode 100644 volume/drivers/adapter.go delete mode 100644 volume/drivers/extpoint.go delete mode 100644 volume/drivers/extpoint_test.go delete mode 100644 volume/drivers/proxy.go delete mode 100644 volume/drivers/proxy_test.go delete mode 100644 volume/local/local.go delete mode 100644 volume/local/local_test.go delete mode 100644 volume/local/local_unix.go delete mode 100644 volume/local/local_windows.go delete mode 100644 volume/store/errors.go delete mode 100644 volume/store/store.go delete mode 100644 volume/store/store_test.go delete mode 100644 volume/store/store_unix.go delete mode 100644 volume/store/store_windows.go delete mode 100644 volume/testutils/testutils.go delete mode 100644 volume/volume.go delete mode 100644 volume/volume_copy.go delete mode 100644 volume/volume_propagation_linux.go delete mode 100644 volume/volume_propagation_linux_test.go delete mode 100644 volume/volume_propagation_unsupported.go delete mode 100644 volume/volume_test.go delete mode 100644 volume/volume_unix.go delete mode 100644 volume/volume_windows.go diff --git a/api/README.md b/api/README.md deleted file mode 100644 index 453f61a1a..000000000 --- a/api/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This directory contains code pertaining to the Docker API: - - - Used by the docker client when communicating with the docker daemon - - - Used by third party tools wishing to interface with the docker daemon diff --git a/api/client/bundlefile/bundlefile.go b/api/client/bundlefile/bundlefile.go deleted file mode 100644 index 75c2d0743..000000000 --- a/api/client/bundlefile/bundlefile.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build experimental - -package bundlefile - -import ( - "encoding/json" - "fmt" - "io" -) - -// Bundlefile stores the contents of a bundlefile -type Bundlefile struct { - Version string - Services map[string]Service -} - -// Service is a service from a bundlefile -type Service struct { - Image string - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Ports []Port `json:",omitempty"` - WorkingDir *string `json:",omitempty"` - User *string `json:",omitempty"` - Networks []string `json:",omitempty"` -} - -// Port is a port as defined in a bundlefile -type Port struct { - Protocol string - Port uint32 -} - -// LoadFile loads a bundlefile from a path to the file -func LoadFile(reader io.Reader) (*Bundlefile, error) { - bundlefile := &Bundlefile{} - - decoder := json.NewDecoder(reader) - if err := decoder.Decode(bundlefile); err != nil { - switch jsonErr := err.(type) { - case *json.SyntaxError: - return nil, fmt.Errorf( - "JSON syntax error at byte %v: %s", - jsonErr.Offset, - jsonErr.Error()) - case *json.UnmarshalTypeError: - return nil, fmt.Errorf( - "Unexpected type at byte %v. Expected %s but received %s.", - jsonErr.Offset, - jsonErr.Type, - jsonErr.Value) - } - return nil, err - } - - return bundlefile, nil -} - -// Print writes the contents of the bundlefile to the output writer -// as human readable json -func Print(out io.Writer, bundle *Bundlefile) error { - bytes, err := json.MarshalIndent(*bundle, "", " ") - if err != nil { - return err - } - - _, err = out.Write(bytes) - return err -} diff --git a/api/client/bundlefile/bundlefile_test.go b/api/client/bundlefile/bundlefile_test.go deleted file mode 100644 index 1ff8235ff..000000000 --- a/api/client/bundlefile/bundlefile_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build experimental - -package bundlefile - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestLoadFileV01Success(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } - }`) - - bundle, err := LoadFile(reader) - assert.NilError(t, err) - assert.Equal(t, bundle.Version, "0.1") - assert.Equal(t, len(bundle.Services), 2) -} - -func TestLoadFileSyntaxError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": unquoted string - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") -} - -func TestLoadFileTypeError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "web": { - "Image": "redis", - "Networks": "none" - } - } - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") -} - -func TestPrint(t *testing.T) { - var buffer bytes.Buffer - bundle := &Bundlefile{ - Version: "0.1", - Services: map[string]Service{ - "web": { - Image: "image", - Command: []string{"echo", "something"}, - }, - }, - } - assert.NilError(t, Print(&buffer, bundle)) - output := buffer.String() - assert.Contains(t, output, "\"Image\": \"image\"") - assert.Contains(t, output, - `"Command": [ - "echo", - "something" - ]`) -} diff --git a/api/client/cli.go b/api/client/cli.go deleted file mode 100644 index b2c06d067..000000000 --- a/api/client/cli.go +++ /dev/null @@ -1,267 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "runtime" - - "github.com/docker/docker/api" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/cliconfig/credentials" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/client" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -// DockerCli represents the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - // initializing closure - init func() error - - // configFile has the client configuration file - configFile *configfile.ConfigFile - // in holds the input stream and closer (io.ReadCloser) for the client. - in io.ReadCloser - // out holds the output stream (io.Writer) for the client. - out io.Writer - // err holds the error stream (io.Writer) for the client. - err io.Writer - // keyFile holds the key file as a string. - keyFile string - // inFd holds the file descriptor of the client's STDIN (if valid). - inFd uintptr - // outFd holds file descriptor of the client's STDOUT (if valid). - outFd uintptr - // isTerminalIn indicates whether the client's STDIN is a TTY - isTerminalIn bool - // isTerminalOut indicates whether the client's STDOUT is a TTY - isTerminalOut bool - // client is the http client that performs all API operations - client client.APIClient - // state holds the terminal state - state *term.State -} - -// Initialize calls the init function that will setup the configuration for the client -// such as the TLS, tcp and other parameters used to run the client. -func (cli *DockerCli) Initialize() error { - if cli.init == nil { - return nil - } - return cli.init() -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() io.Writer { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() io.ReadCloser { - return cli.in -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// IsTerminalOut returns true if the clients stdin is a TTY -func (cli *DockerCli) IsTerminalOut() bool { - return cli.isTerminalOut -} - -// OutFd returns the fd for the stdout stream -func (cli *DockerCli) OutFd() uintptr { - return cli.outFd -} - -// CheckTtyInput checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !cli.isTerminalIn { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// PsFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) PsFormat() string { - return cli.configFile.PsFormat -} - -// ImagesFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) ImagesFormat() string { - return cli.configFile.ImagesFormat -} - -func (cli *DockerCli) setRawTerminal() error { - if cli.isTerminalIn && os.Getenv("NORAW") == "" { - state, err := term.SetRawTerminal(cli.inFd) - if err != nil { - return err - } - cli.state = state - } - return nil -} - -func (cli *DockerCli) restoreTerminal(in io.Closer) error { - if cli.state != nil { - term.RestoreTerminal(cli.inFd, cli.state) - } - // WARNING: DO NOT REMOVE THE OS CHECK !!! - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if in != nil && runtime.GOOS != "darwin" { - return in.Close() - } - return nil -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config -// is set the client scheme will be set to https. -// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). -func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cliflags.ClientFlags) *DockerCli { - cli := &DockerCli{ - in: in, - out: out, - err: err, - keyFile: clientFlags.Common.TrustKey, - } - - cli.init = func() error { - clientFlags.PostParse() - cli.configFile = LoadDefaultConfigFile(err) - - client, err := NewAPIClientFromFlags(clientFlags, cli.configFile) - if err != nil { - return err - } - - cli.client = client - - if cli.in != nil { - cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) - } - if cli.out != nil { - cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) - } - - return nil - } - - return cli -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { - configFile, e := cliconfig.Load(cliconfig.ConfigDir()) - if e != nil { - fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) - } - if !configFile.ContainsAuth() { - credentials.DetectDefaultStore(configFile) - } - return configFile -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(clientFlags *cliflags.ClientFlags, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = clientUserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - host, err = opts.ParseHost(tlsOptions != nil, host) - return -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - - config, err := tlsconfig.Client(*tlsOptions) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -func clientUserAgent() string { - return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" -} diff --git a/api/client/client.go b/api/client/client.go deleted file mode 100644 index 4cfce5f68..000000000 --- a/api/client/client.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package client provides a command-line interface for Docker. -// -// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. -// See https://docs.docker.com/installation/ for instructions on installing Docker. -package client diff --git a/api/client/commands.go b/api/client/commands.go deleted file mode 100644 index 8e0bc3e81..000000000 --- a/api/client/commands.go +++ /dev/null @@ -1,9 +0,0 @@ -package client - -// Command returns a cli command handler if one exists -func (cli *DockerCli) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "exec": cli.CmdExec, - "inspect": cli.CmdInspect, - }[name] -} diff --git a/api/client/container/attach.go b/api/client/container/attach.go deleted file mode 100644 index 0efbec820..000000000 --- a/api/client/container/attach.go +++ /dev/null @@ -1,130 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type attachOptions struct { - noStdin bool - proxy bool - detachKeys string - - container string -} - -// NewAttachCommand creats a new cobra.Command for `docker attach` -func NewAttachCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts attachOptions - - cmd := &cobra.Command{ - Use: "attach [OPTIONS] CONTAINER", - Short: "Attach to a running container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runAttach(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") - flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runAttach(dockerCli *client.DockerCli, opts *attachOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !c.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if c.State.Paused { - return fmt.Errorf("You cannot attach to a paused container, unpause it first") - } - - if err := dockerCli.CheckTtyInput(!opts.noStdin, c.Config.Tty); err != nil { - return err - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: !opts.noStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - if options.Stdin { - in = dockerCli.In() - } - - if opts.proxy && !c.Config.Tty { - sigc := dockerCli.ForwardAllSignals(ctx, opts.container) - defer signal.StopCatch(sigc) - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, opts.container, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - if c.Config.Tty && dockerCli.IsTerminalOut() { - height, width := dockerCli.GetTtySize() - // To handle the case where a user repeatedly attaches/detaches without resizing their - // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially - // resize it, then go back to normal. Without this, every attach after the first will - // require the user to manually resize or hit enter. - dockerCli.ResizeTtyTo(ctx, opts.container, height+1, width+1, false) - - // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back - // to the actual size. - if err := dockerCli.MonitorTtySize(ctx, opts.container, false); err != nil { - logrus.Debugf("Error monitoring TTY size: %s", err) - } - } - if err := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { - return err - } - - if errAttach != nil { - return errAttach - } - - _, status, err := getExitCode(dockerCli, ctx, opts.container) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} diff --git a/api/client/container/commit.go b/api/client/container/commit.go deleted file mode 100644 index ddbe61a87..000000000 --- a/api/client/container/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - dockeropts "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/spf13/cobra" -) - -type commitOptions struct { - container string - reference string - - pause bool - comment string - author string - changes dockeropts.ListOpts - config string -} - -// NewCommitCommand creats a new cobra.Command for `docker commit` -func NewCommitCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts commitOptions - - cmd := &cobra.Command{ - Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", - Short: "Create a new image from a container's changes", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runCommit(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") - flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") - flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - - // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. - flags.StringVar(&opts.config, "run", "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") - flags.MarkDeprecated("run", "it will be replaced with inline Dockerfile commands.") - - return cmd -} - -func runCommit(dockerCli *client.DockerCli, opts *commitOptions) error { - ctx := context.Background() - - name := opts.container - reference := opts.reference - - var config *containertypes.Config - if opts.config != "" { - config = &containertypes.Config{} - if err := json.Unmarshal([]byte(opts.config), config); err != nil { - return err - } - } - - options := types.ContainerCommitOptions{ - Reference: reference, - Comment: opts.comment, - Author: opts.author, - Changes: opts.changes.GetAll(), - Pause: opts.pause, - Config: config, - } - - response, err := dockerCli.Client().ContainerCommit(ctx, name, options) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), response.ID) - return nil -} diff --git a/api/client/container/cp.go b/api/client/container/cp.go deleted file mode 100644 index a0031c8b0..000000000 --- a/api/client/container/cp.go +++ /dev/null @@ -1,303 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type copyOptions struct { - source string - destination string - followLink bool -} - -type copyDirection int - -const ( - fromContainer copyDirection = (1 << iota) - toContainer - acrossContainers = fromContainer | toContainer -) - -type cpConfig struct { - followLink bool -} - -// NewCopyCommand creates a new `docker cp` command -func NewCopyCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts copyOptions - - cmd := &cobra.Command{ - Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, - Short: "Copy files/folders between a container and the local filesystem", - Long: strings.Join([]string{ - "Copy files/folders between a container and the local filesystem\n", - "\nUse '-' as the source to read a tar archive from stdin\n", - "and extract it to a directory destination in a container.\n", - "Use '-' as the destination to stream a tar archive of a\n", - "container source to stdout.", - }, ""), - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return fmt.Errorf("source can not be empty") - } - if args[1] == "" { - return fmt.Errorf("destination can not be empty") - } - opts.source = args[0] - opts.destination = args[1] - return runCopy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") - - return cmd -} - -func runCopy(dockerCli *client.DockerCli, opts copyOptions) error { - srcContainer, srcPath := splitCpArg(opts.source) - dstContainer, dstPath := splitCpArg(opts.destination) - - var direction copyDirection - if srcContainer != "" { - direction |= fromContainer - } - if dstContainer != "" { - direction |= toContainer - } - - cpParam := &cpConfig{ - followLink: opts.followLink, - } - - ctx := context.Background() - - switch direction { - case fromContainer: - return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) - case toContainer: - return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) - case acrossContainers: - // Copying between containers isn't supported. - return fmt.Errorf("copying between containers is not supported") - default: - // User didn't specify any container. - return fmt.Errorf("must specify at least one container source") - } -} - -func statContainerPath(ctx context.Context, dockerCli *client.DockerCli, containerName, path string) (types.ContainerPathStat, error) { - return dockerCli.Client().ContainerStatPath(ctx, containerName, path) -} - -func resolveLocalPath(localPath string) (absPath string, err error) { - if absPath, err = filepath.Abs(localPath); err != nil { - return - } - - return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil -} - -func copyFromContainer(ctx context.Context, dockerCli *client.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { - if dstPath != "-" { - // Get an absolute destination path. - dstPath, err = resolveLocalPath(dstPath) - if err != nil { - return err - } - } - - // if client requests to follow symbol link, then must decide target file to be copied - var rebaseName string - if cpParam.followLink { - srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) - - // If the destination is a symbolic link, we should follow it. - if err == nil && srcStat.Mode&os.ModeSymlink != 0 { - linkTarget := srcStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - srcParent, _ := archive.SplitPathDirEntry(srcPath) - linkTarget = filepath.Join(srcParent, linkTarget) - } - - linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) - srcPath = linkTarget - } - - } - - content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) - if err != nil { - return err - } - defer content.Close() - - if dstPath == "-" { - // Send the response to STDOUT. - _, err = io.Copy(os.Stdout, content) - - return err - } - - // Prepare source copy info. - srcInfo := archive.CopyInfo{ - Path: srcPath, - Exists: true, - IsDir: stat.Mode.IsDir(), - RebaseName: rebaseName, - } - - preArchive := content - if len(srcInfo.RebaseName) != 0 { - _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) - preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) - } - // See comments in the implementation of `archive.CopyTo` for exactly what - // goes into deciding how and whether the source archive needs to be - // altered for the correct copy behavior. - return archive.CopyTo(preArchive, srcInfo, dstPath) -} - -func copyToContainer(ctx context.Context, dockerCli *client.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { - if srcPath != "-" { - // Get an absolute source path. - srcPath, err = resolveLocalPath(srcPath) - if err != nil { - return err - } - } - - // In order to get the copy behavior right, we need to know information - // about both the source and destination. The API is a simple tar - // archive/extract API but we can use the stat info header about the - // destination to be more informed about exactly what the destination is. - - // Prepare destination copy info by stat-ing the container path. - dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) - - // If the destination is a symbolic link, we should evaluate it. - if err == nil && dstStat.Mode&os.ModeSymlink != 0 { - linkTarget := dstStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := archive.SplitPathDirEntry(dstPath) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - dstInfo.Path = linkTarget - dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) - } - - // Ignore any error and assume that the parent directory of the destination - // path exists, in which case the copy may still succeed. If there is any - // type of conflict (e.g., non-directory overwriting an existing directory - // or vice versa) the extraction will fail. If the destination simply did - // not exist, but the parent directory does, the extraction will still - // succeed. - if err == nil { - dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() - } - - var ( - content io.Reader - resolvedDstPath string - ) - - if srcPath == "-" { - // Use STDIN. - content = os.Stdin - resolvedDstPath = dstInfo.Path - if !dstInfo.IsDir { - return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) - } - } else { - // Prepare source copy info. - srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - // With the stat info about the local source as well as the - // destination, we have enough information to know whether we need to - // alter the archive that we upload so that when the server extracts - // it to the specified directory in the container we get the desired - // copy behavior. - - // See comments in the implementation of `archive.PrepareArchiveCopy` - // for exactly what goes into deciding how and whether the source - // archive needs to be altered for the correct copy behavior when it is - // extracted. This function also infers from the source and destination - // info which directory to extract to, which may be the parent of the - // destination that the user specified. - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - resolvedDstPath = dstDir - content = preparedArchive - } - - options := types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - } - - return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) -} - -// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be -// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by -// requiring a LOCALPATH with a `:` to be made explicit with a relative or -// absolute path: -// `/path/to/file:name.txt` or `./file:name.txt` -// -// This is apparently how `scp` handles this as well: -// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ -// -// We can't simply check for a filepath separator because container names may -// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, -// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows -// client, a `:` could be part of an absolute Windows path, in which case it -// is immediately proceeded by a backslash. -func splitCpArg(arg string) (container, path string) { - if system.IsAbs(arg) { - // Explicit local absolute path, e.g., `C:\foo` or `/foo`. - return "", arg - } - - parts := strings.SplitN(arg, ":", 2) - - if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { - // Either there's no `:` in the arg - // OR it's an explicit local relative path like `./file:name.txt`. - return "", arg - } - - return parts[0], parts[1] -} diff --git a/api/client/container/create.go b/api/client/container/create.go deleted file mode 100644 index 5038324a2..000000000 --- a/api/client/container/create.go +++ /dev/null @@ -1,218 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - // FIXME migrate to docker/distribution/reference - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - runconfigopts "github.com/docker/docker/runconfig/opts" - apiclient "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type createOptions struct { - name string -} - -// NewCreateCommand creats a new cobra.Command for `docker create` -func NewCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts createOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runCreate(dockerCli, cmd.Flags(), &opts, copts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - client.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func runCreate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - if err != nil { - reportError(dockerCli.Err(), "create", err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} - -func pullImage(ctx context.Context, dockerCli *client.DockerCli, image string, out io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageCreateOptions{ - RegistryAuth: encodedAuth, - } - - responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream( - responseBody, - out, - dockerCli.OutFd(), - dockerCli.IsTerminalOut(), - nil) -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func (cid *cidFile) Close() error { - cid.file.Close() - - if !cid.written { - if err := os.Remove(cid.path); err != nil { - return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func createContainer(ctx context.Context, dockerCli *client.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { - stderr := dockerCli.Err() - - var containerIDFile *cidFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - var trustedRef reference.Canonical - _, ref, err := reference.ParseIDOrReference(config.Image) - if err != nil { - return nil, err - } - if ref != nil { - ref = reference.WithDefaultTag(ref) - - if ref, ok := ref.(reference.NamedTagged); ok && client.IsTrusted() { - var err error - trustedRef, err = dockerCli.TrustedReference(ctx, ref) - if err != nil { - return nil, err - } - config.Image = trustedRef.String() - } - } - - //create the container - response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - - //if image not found try to pull it - if err != nil { - if apiclient.IsErrImageNotFound(err) && ref != nil { - fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) - - // we don't want to write to stdout anything apart from container.ID - if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { - return nil, err - } - if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { - if err := dockerCli.TagTrusted(ctx, trustedRef, ref); err != nil { - return nil, err - } - } - // Retry - var retryErr error - response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - if retryErr != nil { - return nil, retryErr - } - } else { - return nil, err - } - } - - for _, warning := range response.Warnings { - fmt.Fprintf(stderr, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} diff --git a/api/client/container/diff.go b/api/client/container/diff.go deleted file mode 100644 index 5fdaeadd3..000000000 --- a/api/client/container/diff.go +++ /dev/null @@ -1,61 +0,0 @@ -package container - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - "github.com/spf13/cobra" -) - -type diffOptions struct { - container string -} - -// NewDiffCommand creates a new cobra.Command for `docker diff` -func NewDiffCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts diffOptions - - cmd := &cobra.Command{ - Use: "diff CONTAINER", - Short: "Inspect changes on a container's filesystem", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runDiff(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runDiff(dockerCli *client.DockerCli, opts *diffOptions) error { - if opts.container == "" { - return fmt.Errorf("Container name cannot be empty") - } - ctx := context.Background() - - changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) - if err != nil { - return err - } - - for _, change := range changes { - var kind string - switch change.Kind { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) - } - - return nil -} diff --git a/api/client/container/export.go b/api/client/container/export.go deleted file mode 100644 index 8dbea9f7b..000000000 --- a/api/client/container/export.go +++ /dev/null @@ -1,59 +0,0 @@ -package container - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type exportOptions struct { - container string - output string -} - -// NewExportCommand creates a new `docker export` command -func NewExportCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts exportOptions - - cmd := &cobra.Command{ - Use: "export [OPTIONS] CONTAINER", - Short: "Export a container's filesystem as a tar archive", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runExport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runExport(dockerCli *client.DockerCli, opts exportOptions) error { - if opts.output == "" && dockerCli.IsTerminalOut() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ContainerExport(context.Background(), opts.container) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return client.CopyToFile(opts.output, responseBody) -} diff --git a/api/client/container/kill.go b/api/client/container/kill.go deleted file mode 100644 index 7dd5d277b..000000000 --- a/api/client/container/kill.go +++ /dev/null @@ -1,53 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type killOptions struct { - signal string - - containers []string -} - -// NewKillCommand creats a new cobra.Command for `docker kill` -func NewKillCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts killOptions - - cmd := &cobra.Command{ - Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Kill one or more running container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runKill(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") - return cmd -} - -func runKill(dockerCli *client.DockerCli, opts *killOptions) error { - var errs []string - ctx := context.Background() - for _, name := range opts.containers { - if err := dockerCli.Client().ContainerKill(ctx, name, opts.signal); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/logs.go b/api/client/container/logs.go deleted file mode 100644 index a4b1c46dd..000000000 --- a/api/client/container/logs.go +++ /dev/null @@ -1,88 +0,0 @@ -package container - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -var validDrivers = map[string]bool{ - "json-file": true, - "journald": true, -} - -type logsOptions struct { - follow bool - since string - timestamps bool - details bool - tail string - - container string -} - -// NewLogsCommand creats a new cobra.Command for `docker logs` -func NewLogsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] CONTAINER", - Short: "Fetch the logs of a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runLogs(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *client.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !validDrivers[c.HostConfig.LogConfig.Type] { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) - } - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: opts.details, - } - responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) - if err != nil { - return err - } - defer responseBody.Close() - - if c.Config.Tty { - _, err = io.Copy(dockerCli.Out(), responseBody) - } else { - _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) - } - return err -} diff --git a/api/client/container/pause.go b/api/client/container/pause.go deleted file mode 100644 index b86a57687..000000000 --- a/api/client/container/pause.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type pauseOptions struct { - containers []string -} - -// NewPauseCommand creats a new cobra.Command for `docker pause` -func NewPauseCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts pauseOptions - - cmd := &cobra.Command{ - Use: "pause CONTAINER [CONTAINER...]", - Short: "Pause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runPause(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runPause(dockerCli *client.DockerCli, opts *pauseOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - if err := dockerCli.Client().ContainerPause(ctx, container); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/port.go b/api/client/container/port.go deleted file mode 100644 index 3594eb0c8..000000000 --- a/api/client/container/port.go +++ /dev/null @@ -1,80 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/go-connections/nat" - "github.com/spf13/cobra" -) - -type portOptions struct { - container string - - port string -} - -// NewPortCommand creats a new cobra.Command for `docker port` -func NewPortCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts portOptions - - cmd := &cobra.Command{ - Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", - Short: "List port mappings or a specific mapping for the container", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.port = args[1] - } - return runPort(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runPort(dockerCli *client.DockerCli, opts *portOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if opts.port != "" { - port := opts.port - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - newP, err := nat.NewPort(proto, port) - if err != nil { - return err - } - if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) - } - return nil - } - return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) - } - - for from, frontends := range c.NetworkSettings.Ports { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) - } - } - - return nil -} diff --git a/api/client/container/ps.go b/api/client/container/ps.go deleted file mode 100644 index cc4add938..000000000 --- a/api/client/container/ps.go +++ /dev/null @@ -1,125 +0,0 @@ -package container - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/formatter" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - - "github.com/docker/docker/utils/templates" - "github.com/spf13/cobra" - "io/ioutil" -) - -type psOptions struct { - quiet bool - size bool - all bool - noTrunc bool - nLatest bool - last int - format string - filter []string -} - -type preProcessor struct { - opts *types.ContainerListOptions -} - -// Size sets the size option when called by a template execution. -func (p *preProcessor) Size() bool { - p.opts.Size = true - return true -} - -// NewPsCommand creates a new cobra.Command for `docker ps` -func NewPsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts psOptions - - cmd := &cobra.Command{ - Use: "ps [OPTIONS]", - Short: "List containers", - Args: cli.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return runPs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") - flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") - flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runPs(dockerCli *client.DockerCli, opts *psOptions) error { - ctx := context.Background() - - if opts.nLatest && opts.last == -1 { - opts.last = 1 - } - - containerFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - containerFilterArgs, err = filters.ParseFlag(f, containerFilterArgs) - if err != nil { - return err - } - } - - options := types.ContainerListOptions{ - All: opts.all, - Limit: opts.last, - Size: opts.size, - Filter: containerFilterArgs, - } - - pre := &preProcessor{opts: &options} - tmpl, err := templates.Parse(opts.format) - - if err != nil { - return err - } - - _ = tmpl.Execute(ioutil.Discard, pre) - - containers, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - return err - } - - f := opts.format - if len(f) == 0 { - if len(dockerCli.PsFormat()) > 0 && !opts.quiet { - f = dockerCli.PsFormat() - } else { - f = "table" - } - } - - psCtx := formatter.ContainerContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: f, - Quiet: opts.quiet, - Trunc: !opts.noTrunc, - }, - Size: opts.size, - Containers: containers, - } - - psCtx.Write() - - return nil -} diff --git a/api/client/container/rename.go b/api/client/container/rename.go deleted file mode 100644 index e6bc723c8..000000000 --- a/api/client/container/rename.go +++ /dev/null @@ -1,53 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type renameOptions struct { - oldName string - newName string -} - -// NewRenameCommand creats a new cobra.Command for `docker rename` -func NewRenameCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts renameOptions - - cmd := &cobra.Command{ - Use: "rename OLD_NAME NEW_NAME", - Short: "Rename a container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.oldName = args[0] - opts.newName = args[1] - return runRename(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runRename(dockerCli *client.DockerCli, opts *renameOptions) error { - ctx := context.Background() - - oldName := strings.TrimSpace(opts.oldName) - newName := strings.TrimSpace(opts.newName) - - if oldName == "" || newName == "" { - return fmt.Errorf("Error: Neither old nor new names may be empty") - } - - if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - return fmt.Errorf("Error: failed to rename container named %s", oldName) - } - return nil -} diff --git a/api/client/container/restart.go b/api/client/container/restart.go deleted file mode 100644 index 4e80e1250..000000000 --- a/api/client/container/restart.go +++ /dev/null @@ -1,55 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type restartOptions struct { - nSeconds int - - containers []string -} - -// NewRestartCommand creates a new cobra.Command for `docker restart` -func NewRestartCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts restartOptions - - cmd := &cobra.Command{ - Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Restart a container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRestart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") - return cmd -} - -func runRestart(dockerCli *client.DockerCli, opts *restartOptions) error { - ctx := context.Background() - var errs []string - for _, name := range opts.containers { - timeout := time.Duration(opts.nSeconds) * time.Second - if err := dockerCli.Client().ContainerRestart(ctx, name, &timeout); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/rm.go b/api/client/container/rm.go deleted file mode 100644 index 2ccc6c21f..000000000 --- a/api/client/container/rm.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type rmOptions struct { - rmVolumes bool - rmLink bool - force bool - - containers []string -} - -// NewRmCommand creates a new cobra.Command for `docker rm` -func NewRmCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Remove one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRm(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") - flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") - return cmd -} - -func runRm(dockerCli *client.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs []string - for _, name := range opts.containers { - if name == "" { - return fmt.Errorf("Container name cannot be empty") - } - name = strings.Trim(name, "/") - - if err := removeContainer(dockerCli, ctx, name, opts.rmVolumes, opts.rmLink, opts.force); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} - -func removeContainer(dockerCli *client.DockerCli, ctx context.Context, container string, removeVolumes, removeLinks, force bool) error { - options := types.ContainerRemoveOptions{ - RemoveVolumes: removeVolumes, - RemoveLinks: removeLinks, - Force: force, - } - if err := dockerCli.Client().ContainerRemove(ctx, container, options); err != nil { - return err - } - return nil -} diff --git a/api/client/container/run.go b/api/client/container/run.go deleted file mode 100644 index 860ff59ed..000000000 --- a/api/client/container/run.go +++ /dev/null @@ -1,329 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "os" - "runtime" - "strings" - "syscall" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - opttypes "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type runOptions struct { - autoRemove bool - detach bool - sigProxy bool - name string - detachKeys string -} - -// NewRunCommand create a new `docker run` command -func NewRunCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts runOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Run a command in a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runRun(dockerCli, cmd.Flags(), &opts, copts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.SetInterspersed(false) - - // These are flags not stored in Config/HostConfig - flags.BoolVar(&opts.autoRemove, "rm", false, "Automatically remove the container when it exits") - flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") - flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - client.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func flagErrorFunc(cmd *cobra.Command, err error) error { - return cli.StatusError{ - Status: cli.FlagErrorFunc(cmd, err).Error(), - StatusCode: 125, - } -} - -func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { - stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() - client := dockerCli.Client() - // TODO: pass this as an argument - cmdPath := "run" - - var ( - flAttach *opttypes.ListOpts - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") - ) - - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - - // just in case the Parse does not exit - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - - if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { - fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") - } - - if len(hostConfig.DNS) > 0 { - // check the DNS settings passed via --dns against - // localhost regexp to warn if they are trying to - // set a DNS to a localhost address - for _, dnsIP := range hostConfig.DNS { - if dns.IsLocalhost(dnsIP) { - fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - break - } - } - } - - config.ArgsEscaped = false - - if !opts.detach { - if err := dockerCli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if fl := flags.Lookup("attach"); fl != nil { - flAttach = fl.Value.(*opttypes.ListOpts) - if flAttach.Len() != 0 { - return ErrConflictAttachDetach - } - } - if opts.autoRemove { - return ErrConflictDetachAutoRemove - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable sigProxy when in TTY mode - if config.Tty { - opts.sigProxy = false - } - - // Telling the Windows daemon the initial size of the tty during start makes - // a far better user experience rather than relying on subsequent resizes - // to cause things to catch up. - if runtime.GOOS == "windows" { - hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.GetTtySize() - } - - ctx, cancelFun := context.WithCancel(context.Background()) - - createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return runStartContainerErr(err) - } - if opts.sigProxy { - sigc := dockerCli.ForwardAllSignals(ctx, createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayID chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayID = make(chan struct{}) - go func() { - defer close(waitDisplayID) - fmt.Fprintf(stdout, "%s\n", createResponse.ID) - }() - } - if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { - return ErrConflictRestartPolicyAndAutoRemove - } - attach := config.AttachStdin || config.AttachStdout || config.AttachStderr - if attach { - var ( - out, cerr io.Writer - in io.ReadCloser - ) - if config.AttachStdin { - in = stdin - } - if config.AttachStdout { - out = stdout - } - if config.AttachStderr { - if config.Tty { - cerr = stdout - } else { - cerr = stderr - } - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - errCh = promise.Go(func() error { - errHijack := dockerCli.HoldHijackedConnection(ctx, config.Tty, in, out, cerr, resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - } - - if opts.autoRemove { - defer func() { - // Explicitly not sharing the context as it could be "Done" (by calling cancelFun) - // and thus the container would not be removed. - if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil { - fmt.Fprintf(stderr, "%v\n", err) - } - }() - } - - //start the container - if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { - // If we have holdHijackedConnection, we should notify - // holdHijackedConnection we are going to exit and wait - // to avoid the terminal are not restored. - if attach { - cancelFun() - <-errCh - } - - reportError(stderr, cmdPath, err.Error(), false) - return runStartContainerErr(err) - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.IsTerminalOut() { - if err := dockerCli.MonitorTtySize(ctx, createResponse.ID, false); err != nil { - fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayID - return nil - } - - var status int - - // Attached mode - if opts.autoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { - return runStartContainerErr(err) - } - if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil { - return err - } - } else { - // No Autoremove: Simply retrieve the exit code - if !config.Tty && hostConfig.RestartPolicy.IsNone() { - // In non-TTY mode, we can't detach, so we must wait for container exit - if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { - return err - } - } else { - // In TTY mode, there is a race: if the process dies too slowly, the state could - // be updated after the getExitCode call and result in the wrong exit code being reported - if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil { - return err - } - } - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -// reportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func reportError(stderr io.Writer, name string, str string, withHelp bool) { - if withHelp { - str += ".\nSee '" + os.Args[0] + " " + name + " --help'" - } - fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) -} - -// if container start fails with 'not found'/'no such' error, return 127 -// if container start fails with 'permission denied' error, return 126 -// return 125 for generic docker daemon failures -func runStartContainerErr(err error) error { - trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") - statusError := cli.StatusError{StatusCode: 125} - if strings.Contains(trimmedErr, "executable file not found") || - strings.Contains(trimmedErr, "no such file or directory") || - strings.Contains(trimmedErr, "system cannot find the file specified") { - statusError = cli.StatusError{StatusCode: 127} - } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { - statusError = cli.StatusError{StatusCode: 126} - } - - return statusError -} diff --git a/api/client/container/start.go b/api/client/container/start.go deleted file mode 100644 index df955d281..000000000 --- a/api/client/container/start.go +++ /dev/null @@ -1,153 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type startOptions struct { - attach bool - openStdin bool - detachKeys string - - containers []string -} - -// NewStartCommand creats a new cobra.Command for `docker start` -func NewStartCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts startOptions - - cmd := &cobra.Command{ - Use: "start [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Start one or more stopped containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStart(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") - flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runStart(dockerCli *client.DockerCli, opts *startOptions) error { - ctx, cancelFun := context.WithCancel(context.Background()) - - if opts.attach || opts.openStdin { - // We're going to attach to a container. - // 1. Ensure we only have one container. - if len(opts.containers) > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - // 2. Attach to the container. - container := opts.containers[0] - c, err := dockerCli.Client().ContainerInspect(ctx, container) - if err != nil { - return err - } - - // We always use c.ID instead of container to maintain consistency during `docker start` - if !c.Config.Tty { - sigc := dockerCli.ForwardAllSignals(ctx, c.ID) - defer signal.StopCatch(sigc) - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: opts.openStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - - if options.Stdin { - in = dockerCli.In() - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach return an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection - return errAttach - } - defer resp.Close() - cErr := promise.Go(func() error { - errHijack := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - - // 3. Start the container. - if err := dockerCli.Client().ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { - cancelFun() - <-cErr - return err - } - - // 4. Wait for attachment to break. - if c.Config.Tty && dockerCli.IsTerminalOut() { - if err := dockerCli.MonitorTtySize(ctx, c.ID, false); err != nil { - fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - _, status, err := getExitCode(dockerCli, ctx, c.ID) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - } else { - // We're not going to attach to anything. - // Start as many containers as we want. - return startContainersWithoutAttachments(dockerCli, ctx, opts.containers) - } - - return nil -} - -func startContainersWithoutAttachments(dockerCli *client.DockerCli, ctx context.Context, containers []string) error { - var failedContainers []string - for _, container := range containers { - if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - failedContainers = append(failedContainers, container) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - - if len(failedContainers) > 0 { - return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) - } - return nil -} diff --git a/api/client/container/stats.go b/api/client/container/stats.go deleted file mode 100644 index 46861514c..000000000 --- a/api/client/container/stats.go +++ /dev/null @@ -1,223 +0,0 @@ -package container - -import ( - "fmt" - "io" - "strings" - "sync" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/system" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type statsOptions struct { - all bool - noStream bool - - containers []string -} - -// NewStatsCommand creats a new cobra.Command for `docker stats` -func NewStatsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts statsOptions - - cmd := &cobra.Command{ - Use: "stats [OPTIONS] [CONTAINER...]", - Short: "Display a live stream of container(s) resource usage statistics", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStats(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") - return cmd -} - -// runStats displays a live stream of resource usage statistics for one or more containers. -// This shows real-time information on CPU usage, memory usage, and network I/O. -func runStats(dockerCli *client.DockerCli, opts *statsOptions) error { - showAll := len(opts.containers) == 0 - closeChan := make(chan error) - - ctx := context.Background() - - // monitorContainerEvents watches for container creation and removal (only - // used when calling `docker stats` without arguments). - monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { - f := filters.NewArgs() - f.Add("type", "container") - options := types.EventsOptions{ - Filters: f, - } - resBody, err := dockerCli.Client().Events(ctx, options) - // Whether we successfully subscribed to events or not, we can now - // unblock the main goroutine. - close(started) - if err != nil { - closeChan <- err - return - } - defer resBody.Close() - - system.DecodeEvents(resBody, func(event events.Message, err error) error { - if err != nil { - closeChan <- err - return nil - } - c <- event - return nil - }) - } - - // waitFirst is a WaitGroup to wait first stat data's reach for each container - waitFirst := &sync.WaitGroup{} - - cStats := stats{} - // getContainerList simulates creation event for all previously existing - // containers (only used when calling `docker stats` without arguments). - getContainerList := func() { - options := types.ContainerListOptions{ - All: opts.all, - } - cs, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - closeChan <- err - } - for _, container := range cs { - s := &containerStats{Name: container.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - } - - if showAll { - // If no names were specified, start a long running goroutine which - // monitors container events. We make sure we're subscribed before - // retrieving the list of running containers to avoid a race where we - // would "miss" a creation. - started := make(chan struct{}) - eh := system.InitEventHandler() - eh.Handle("create", func(e events.Message) { - if opts.all { - s := &containerStats{Name: e.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - }) - - eh.Handle("start", func(e events.Message) { - s := &containerStats{Name: e.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - }) - - eh.Handle("die", func(e events.Message) { - if !opts.all { - cStats.remove(e.ID[:12]) - } - }) - - eventChan := make(chan events.Message) - go eh.Watch(eventChan) - go monitorContainerEvents(started, eventChan) - defer close(eventChan) - <-started - - // Start a short-lived goroutine to retrieve the initial list of - // containers. - getContainerList() - } else { - // Artificially send creation events for the containers we were asked to - // monitor (same code path than we use when monitoring all containers). - for _, name := range opts.containers { - s := &containerStats{Name: name} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - - // We don't expect any asynchronous errors: closeChan can be closed. - close(closeChan) - - // Do a quick pause to detect any error with the provided list of - // container names. - time.Sleep(1500 * time.Millisecond) - var errs []string - cStats.mu.Lock() - for _, c := range cStats.cs { - c.mu.Lock() - if c.err != nil { - errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) - } - c.mu.Unlock() - } - cStats.mu.Unlock() - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ", ")) - } - } - - // before print to screen, make sure each container get at least one valid stat data - waitFirst.Wait() - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - printHeader := func() { - if !opts.noStream { - fmt.Fprint(dockerCli.Out(), "\033[2J") - fmt.Fprint(dockerCli.Out(), "\033[H") - } - io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") - } - - for range time.Tick(500 * time.Millisecond) { - printHeader() - cStats.mu.Lock() - for _, s := range cStats.cs { - if err := s.Display(w); err != nil && !opts.noStream { - logrus.Debugf("stats: got error for %s: %v", s.Name, err) - } - } - cStats.mu.Unlock() - w.Flush() - if opts.noStream { - break - } - select { - case err, ok := <-closeChan: - if ok { - if err != nil { - // this is suppressing "unexpected EOF" in the cli when the - // daemon restarts so it shutdowns cleanly - if err == io.ErrUnexpectedEOF { - return nil - } - return err - } - } - default: - // just skip - } - } - return nil -} diff --git a/api/client/container/stats_helpers.go b/api/client/container/stats_helpers.go deleted file mode 100644 index 608366964..000000000 --- a/api/client/container/stats_helpers.go +++ /dev/null @@ -1,234 +0,0 @@ -package container - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -type containerStats struct { - Name string - CPUPercentage float64 - Memory float64 - MemoryLimit float64 - MemoryPercentage float64 - NetworkRx float64 - NetworkTx float64 - BlockRead float64 - BlockWrite float64 - PidsCurrent uint64 - mu sync.Mutex - err error -} - -type stats struct { - mu sync.Mutex - cs []*containerStats -} - -func (s *stats) add(cs *containerStats) bool { - s.mu.Lock() - defer s.mu.Unlock() - if _, exists := s.isKnownContainer(cs.Name); !exists { - s.cs = append(s.cs, cs) - return true - } - return false -} - -func (s *stats) remove(id string) { - s.mu.Lock() - if i, exists := s.isKnownContainer(id); exists { - s.cs = append(s.cs[:i], s.cs[i+1:]...) - } - s.mu.Unlock() -} - -func (s *stats) isKnownContainer(cid string) (int, bool) { - for i, c := range s.cs { - if c.Name == cid { - return i, true - } - } - return -1, false -} - -func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { - logrus.Debugf("collecting stats for %s", s.Name) - var ( - getFirst bool - previousCPU uint64 - previousSystem uint64 - u = make(chan error, 1) - ) - - defer func() { - // if error happens and we get nothing of stats, release wait group whatever - if !getFirst { - getFirst = true - waitFirst.Done() - } - }() - - responseBody, err := cli.ContainerStats(ctx, s.Name, streamStats) - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - return - } - defer responseBody.Close() - - dec := json.NewDecoder(responseBody) - go func() { - for { - var v *types.StatsJSON - - if err := dec.Decode(&v); err != nil { - dec = json.NewDecoder(io.MultiReader(dec.Buffered(), responseBody)) - u <- err - continue - } - - var memPercent = 0.0 - var cpuPercent = 0.0 - - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if v.MemoryStats.Limit != 0 { - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - } - - previousCPU = v.PreCPUStats.CPUUsage.TotalUsage - previousSystem = v.PreCPUStats.SystemUsage - cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) - blkRead, blkWrite := calculateBlockIO(v.BlkioStats) - s.mu.Lock() - s.CPUPercentage = cpuPercent - s.Memory = float64(v.MemoryStats.Usage) - s.MemoryLimit = float64(v.MemoryStats.Limit) - s.MemoryPercentage = memPercent - s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) - s.BlockRead = float64(blkRead) - s.BlockWrite = float64(blkWrite) - s.PidsCurrent = v.PidsStats.Current - s.mu.Unlock() - u <- nil - if !streamStats { - return - } - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.mu.Lock() - s.CPUPercentage = 0 - s.Memory = 0 - s.MemoryPercentage = 0 - s.MemoryLimit = 0 - s.NetworkRx = 0 - s.NetworkTx = 0 - s.BlockRead = 0 - s.BlockWrite = 0 - s.PidsCurrent = 0 - s.err = errors.New("timeout waiting for stats") - s.mu.Unlock() - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - case err := <-u: - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - continue - } - s.err = nil - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - } - if !streamStats { - return - } - } -} - -func (s *containerStats) Display(w io.Writer) error { - s.mu.Lock() - defer s.mu.Unlock() - // NOTE: if you change this format, you must also change the err format below! - format := "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n" - if s.err != nil { - format = "%s\t%s\t%s / %s\t%s\t%s / %s\t%s / %s\t%s\n" - errStr := "--" - fmt.Fprintf(w, format, - s.Name, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr, - ) - err := s.err - return err - } - fmt.Fprintf(w, format, - s.Name, - s.CPUPercentage, - units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit), - s.MemoryPercentage, - units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), - units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite), - s.PidsCurrent) - return nil -} - -func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) - // calculate the change for the entire system between readings - systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) - ) - - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 - } - return cpuPercent -} - -func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { - for _, bioEntry := range blkio.IoServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - return -} - -func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { - var rx, tx float64 - - for _, v := range network { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - return rx, tx -} diff --git a/api/client/container/stats_unit_test.go b/api/client/container/stats_unit_test.go deleted file mode 100644 index 83f24bb29..000000000 --- a/api/client/container/stats_unit_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package container - -import ( - "bytes" - "testing" - - "github.com/docker/engine-api/types" -) - -func TestDisplay(t *testing.T) { - c := &containerStats{ - Name: "app", - CPUPercentage: 30.0, - Memory: 100 * 1024 * 1024.0, - MemoryLimit: 2048 * 1024 * 1024.0, - MemoryPercentage: 100.0 / 2048.0 * 100.0, - NetworkRx: 100 * 1024 * 1024, - NetworkTx: 800 * 1024 * 1024, - BlockRead: 100 * 1024 * 1024, - BlockWrite: 800 * 1024 * 1024, - PidsCurrent: 1, - } - var b bytes.Buffer - if err := c.Display(&b); err != nil { - t.Fatalf("c.Display() gave error: %s", err) - } - got := b.String() - want := "app\t30.00%\t100 MiB / 2 GiB\t4.88%\t104.9 MB / 838.9 MB\t104.9 MB / 838.9 MB\t1\n" - if got != want { - t.Fatalf("c.Display() = %q, want %q", got, want) - } -} - -func TestCalculBlockIO(t *testing.T) { - blkio := types.BlkioStats{ - IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, - } - blkRead, blkWrite := calculateBlockIO(blkio) - if blkRead != 5801 { - t.Fatalf("blkRead = %d, want 5801", blkRead) - } - if blkWrite != 579 { - t.Fatalf("blkWrite = %d, want 579", blkWrite) - } -} diff --git a/api/client/container/stop.go b/api/client/container/stop.go deleted file mode 100644 index b05b0e3df..000000000 --- a/api/client/container/stop.go +++ /dev/null @@ -1,57 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type stopOptions struct { - time int - - containers []string -} - -// NewStopCommand creats a new cobra.Command for `docker stop` -func NewStopCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts stopOptions - - cmd := &cobra.Command{ - Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Stop one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStop(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") - return cmd -} - -func runStop(dockerCli *client.DockerCli, opts *stopOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - timeout := time.Duration(opts.time) * time.Second - if err := dockerCli.Client().ContainerStop(ctx, container, &timeout); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/top.go b/api/client/container/top.go deleted file mode 100644 index e31a903ad..000000000 --- a/api/client/container/top.go +++ /dev/null @@ -1,59 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type topOptions struct { - container string - - args []string -} - -// NewTopCommand creates a new cobra.Command for `docker top` -func NewTopCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts topOptions - - cmd := &cobra.Command{ - Use: "top CONTAINER [ps OPTIONS]", - Short: "Display the running processes of a container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - opts.args = args[1:] - return runTop(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTop(dockerCli *client.DockerCli, opts *topOptions) error { - ctx := context.Background() - - procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) - - for _, proc := range procList.Processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} diff --git a/api/client/container/unpause.go b/api/client/container/unpause.go deleted file mode 100644 index 3d178a006..000000000 --- a/api/client/container/unpause.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type unpauseOptions struct { - containers []string -} - -// NewUnpauseCommand creats a new cobra.Command for `docker unpause` -func NewUnpauseCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts unpauseOptions - - cmd := &cobra.Command{ - Use: "unpause CONTAINER [CONTAINER...]", - Short: "Unpause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runUnpause(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runUnpause(dockerCli *client.DockerCli, opts *unpauseOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - if err := dockerCli.Client().ContainerUnpause(ctx, container); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/update.go b/api/client/container/update.go deleted file mode 100644 index 77f669d4a..000000000 --- a/api/client/container/update.go +++ /dev/null @@ -1,150 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - runconfigopts "github.com/docker/docker/runconfig/opts" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type updateOptions struct { - blkioWeight uint16 - cpuPeriod int64 - cpuQuota int64 - cpusetCpus string - cpusetMems string - cpuShares int64 - memoryString string - memoryReservation string - memorySwap string - kernelMemory string - restartPolicy string - - nFlag int - - containers []string -} - -// NewUpdateCommand creats a new cobra.Command for `docker update` -func NewUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts updateOptions - - cmd := &cobra.Command{ - Use: "update [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Update configuration of one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.nFlag = cmd.Flags().NFlag() - return runUpdate(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - flags := cmd.Flags() - flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000") - flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.StringVarP(&opts.memoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&opts.memoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&opts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.StringVar(&opts.kernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") - - return cmd -} - -func runUpdate(dockerCli *client.DockerCli, opts *updateOptions) error { - var err error - - if opts.nFlag == 0 { - return fmt.Errorf("You must provide one or more flags when using this command.") - } - - var memory int64 - if opts.memoryString != "" { - memory, err = units.RAMInBytes(opts.memoryString) - if err != nil { - return err - } - } - - var memoryReservation int64 - if opts.memoryReservation != "" { - memoryReservation, err = units.RAMInBytes(opts.memoryReservation) - if err != nil { - return err - } - } - - var memorySwap int64 - if opts.memorySwap != "" { - if opts.memorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(opts.memorySwap) - if err != nil { - return err - } - } - } - - var kernelMemory int64 - if opts.kernelMemory != "" { - kernelMemory, err = units.RAMInBytes(opts.kernelMemory) - if err != nil { - return err - } - } - - var restartPolicy containertypes.RestartPolicy - if opts.restartPolicy != "" { - restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) - if err != nil { - return err - } - } - - resources := containertypes.Resources{ - BlkioWeight: opts.blkioWeight, - CpusetCpus: opts.cpusetCpus, - CpusetMems: opts.cpusetMems, - CPUShares: opts.cpuShares, - Memory: memory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - KernelMemory: kernelMemory, - CPUPeriod: opts.cpuPeriod, - CPUQuota: opts.cpuQuota, - } - - updateConfig := containertypes.UpdateConfig{ - Resources: resources, - RestartPolicy: restartPolicy, - } - - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - if err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/utils.go b/api/client/container/utils.go deleted file mode 100644 index 0fbfd3d52..000000000 --- a/api/client/container/utils.go +++ /dev/null @@ -1,22 +0,0 @@ -package container - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - clientapi "github.com/docker/engine-api/client" -) - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(dockerCli *client.DockerCli, ctx context.Context, containerID string) (bool, int, error) { - c, err := dockerCli.Client().ContainerInspect(ctx, containerID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != clientapi.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - return c.State.Running, c.State.ExitCode, nil -} diff --git a/api/client/container/wait.go b/api/client/container/wait.go deleted file mode 100644 index 4b6be886e..000000000 --- a/api/client/container/wait.go +++ /dev/null @@ -1,52 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type waitOptions struct { - containers []string -} - -// NewWaitCommand creates a new cobra.Command for `docker wait` -func NewWaitCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts waitOptions - - cmd := &cobra.Command{ - Use: "wait CONTAINER [CONTAINER...]", - Short: "Block until a container stops, then print its exit code", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runWait(dockerCli, &opts) - }, - } - cmd.SetFlagErrorFunc(flagErrorFunc) - - return cmd -} - -func runWait(dockerCli *client.DockerCli, opts *waitOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - status, err := dockerCli.Client().ContainerWait(ctx, container) - if err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%d\n", status) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/credentials.go b/api/client/credentials.go deleted file mode 100644 index 9f5e80c7a..000000000 --- a/api/client/credentials.go +++ /dev/null @@ -1,44 +0,0 @@ -package client - -import ( - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/cliconfig/credentials" - "github.com/docker/engine-api/types" -) - -// GetCredentials loads the user credentials from a credentials store. -// The store is determined by the config file settings. -func GetCredentials(c *configfile.ConfigFile, serverAddress string) (types.AuthConfig, error) { - s := LoadCredentialsStore(c) - return s.Get(serverAddress) -} - -// GetAllCredentials loads all credentials from a credentials store. -// The store is determined by the config file settings. -func GetAllCredentials(c *configfile.ConfigFile) (map[string]types.AuthConfig, error) { - s := LoadCredentialsStore(c) - return s.GetAll() -} - -// StoreCredentials saves the user credentials in a credentials store. -// The store is determined by the config file settings. -func StoreCredentials(c *configfile.ConfigFile, auth types.AuthConfig) error { - s := LoadCredentialsStore(c) - return s.Store(auth) -} - -// EraseCredentials removes the user credentials from a credentials store. -// The store is determined by the config file settings. -func EraseCredentials(c *configfile.ConfigFile, serverAddress string) error { - s := LoadCredentialsStore(c) - return s.Erase(serverAddress) -} - -// LoadCredentialsStore initializes a new credentials store based -// in the settings provided in the configuration file. -func LoadCredentialsStore(c *configfile.ConfigFile) credentials.Store { - if c.CredentialsStore != "" { - return credentials.NewNativeStore(c) - } - return credentials.NewFileStore(c) -} diff --git a/api/client/exec.go b/api/client/exec.go deleted file mode 100644 index 2aac0d606..000000000 --- a/api/client/exec.go +++ /dev/null @@ -1,160 +0,0 @@ -package client - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/promise" - "github.com/docker/engine-api/types" -) - -// CmdExec runs a command in a running container. -// -// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] -func (cli *DockerCli) CmdExec(args ...string) error { - cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) - detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") - - execConfig, err := ParseExec(cmd, args) - container := cmd.Arg(0) - // just in case the ParseExec does not exit - if container == "" || err != nil { - return Cli.StatusError{StatusCode: 1} - } - - if *detachKeys != "" { - cli.configFile.DetachKeys = *detachKeys - } - - // Send client escape keys - execConfig.DetachKeys = cli.configFile.DetachKeys - - ctx := context.Background() - - response, err := cli.client.ContainerExecCreate(ctx, container, *execConfig) - if err != nil { - return err - } - - execID := response.ID - if execID == "" { - fmt.Fprintf(cli.out, "exec ID empty") - return nil - } - - //Temp struct for execStart so that we don't need to transfer all the execConfig - if !execConfig.Detach { - if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - execStartCheck := types.ExecStartCheck{ - Detach: execConfig.Detach, - Tty: execConfig.Tty, - } - - if err := cli.client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(cli.out, "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - errCh chan error - ) - - if execConfig.AttachStdin { - in = cli.in - } - if execConfig.AttachStdout { - out = cli.out - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - resp, err := cli.client.ContainerExecAttach(ctx, execID, *execConfig) - if err != nil { - return err - } - defer resp.Close() - errCh = promise.Go(func() error { - return cli.HoldHijackedConnection(ctx, execConfig.Tty, in, out, stderr, resp) - }) - - if execConfig.Tty && cli.isTerminalIn { - if err := cli.MonitorTtySize(ctx, execID, true); err != nil { - fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) - } - } - - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = cli.getExecExitCode(ctx, execID); err != nil { - return err - } - - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - - return nil -} - -// ParseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -// If the minimal number of specified args is not right or if specified args are -// not valid, it will return an error. -func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { - var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") - execCmd []string - ) - cmd.Require(flag.Min, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return nil, err - } - parsedArgs := cmd.Args() - execCmd = parsedArgs[1:] - - execConfig := &types.ExecConfig{ - User: *flUser, - Privileged: *flPrivileged, - Tty: *flTty, - Cmd: execCmd, - Detach: *flDetach, - } - - // If -d is not set, attach to everything by default - if !*flDetach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if *flStdin { - execConfig.AttachStdin = true - } - } - - return execConfig, nil -} diff --git a/api/client/exec_test.go b/api/client/exec_test.go deleted file mode 100644 index 8b1a3674e..000000000 --- a/api/client/exec_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package client - -import ( - "fmt" - "io/ioutil" - "testing" - - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -type arguments struct { - args []string -} - -func TestParseExec(t *testing.T) { - invalids := map[*arguments]error{ - &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), - &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), - &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), - } - valids := map[*arguments]*types.ExecConfig{ - &arguments{ - []string{"container", "command"}, - }: { - Cmd: []string{"command"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"container", "command1", "command2"}, - }: { - Cmd: []string{"command1", "command2"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"-i", "-t", "-u", "uid", "container", "command"}, - }: { - User: "uid", - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-t", "-i", "-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Tty: true, - Cmd: []string{"command"}, - }, - } - for invalid, expectedError := range invalids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - _, err := ParseExec(cmd, invalid.args) - if err == nil || err.Error() != expectedError.Error() { - t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) - } - - } - for valid, expectedExecConfig := range valids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - execConfig, err := ParseExec(cmd, valid.args) - if err != nil { - t.Fatal(err) - } - if !compareExecConfig(expectedExecConfig, execConfig) { - t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) - } - } -} - -func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { - if config1.AttachStderr != config2.AttachStderr { - return false - } - if config1.AttachStdin != config2.AttachStdin { - return false - } - if config1.AttachStdout != config2.AttachStdout { - return false - } - if config1.Detach != config2.Detach { - return false - } - if config1.Privileged != config2.Privileged { - return false - } - if config1.Tty != config2.Tty { - return false - } - if config1.User != config2.User { - return false - } - if len(config1.Cmd) != len(config2.Cmd) { - return false - } - for index, value := range config1.Cmd { - if value != config2.Cmd[index] { - return false - } - } - return true -} diff --git a/api/client/formatter/custom.go b/api/client/formatter/custom.go deleted file mode 100644 index 079a71cff..000000000 --- a/api/client/formatter/custom.go +++ /dev/null @@ -1,243 +0,0 @@ -package formatter - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" -) - -const ( - tableKey = "table" - - containerIDHeader = "CONTAINER ID" - imageHeader = "IMAGE" - namesHeader = "NAMES" - commandHeader = "COMMAND" - createdSinceHeader = "CREATED" - createdAtHeader = "CREATED AT" - runningForHeader = "CREATED" - statusHeader = "STATUS" - portsHeader = "PORTS" - sizeHeader = "SIZE" - labelsHeader = "LABELS" - imageIDHeader = "IMAGE ID" - repositoryHeader = "REPOSITORY" - tagHeader = "TAG" - digestHeader = "DIGEST" - mountsHeader = "MOUNTS" -) - -type containerContext struct { - baseSubContext - trunc bool - c types.Container -} - -func (c *containerContext) ID() string { - c.addHeader(containerIDHeader) - if c.trunc { - return stringid.TruncateID(c.c.ID) - } - return c.c.ID -} - -func (c *containerContext) Names() string { - c.addHeader(namesHeader) - names := stripNamePrefix(c.c.Names) - if c.trunc { - for _, name := range names { - if len(strings.Split(name, "/")) == 1 { - names = []string{name} - break - } - } - } - return strings.Join(names, ",") -} - -func (c *containerContext) Image() string { - c.addHeader(imageHeader) - if c.c.Image == "" { - return "" - } - if c.trunc { - if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { - return trunc - } - } - return c.c.Image -} - -func (c *containerContext) Command() string { - c.addHeader(commandHeader) - command := c.c.Command - if c.trunc { - command = stringutils.Truncate(command, 20) - } - return strconv.Quote(command) -} - -func (c *containerContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.c.Created), 0).String() -} - -func (c *containerContext) RunningFor() string { - c.addHeader(runningForHeader) - createdAt := time.Unix(int64(c.c.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *containerContext) Ports() string { - c.addHeader(portsHeader) - return api.DisplayablePorts(c.c.Ports) -} - -func (c *containerContext) Status() string { - c.addHeader(statusHeader) - return c.c.Status -} - -func (c *containerContext) Size() string { - c.addHeader(sizeHeader) - srw := units.HumanSize(float64(c.c.SizeRw)) - sv := units.HumanSize(float64(c.c.SizeRootFs)) - - sf := srw - if c.c.SizeRootFs > 0 { - sf = fmt.Sprintf("%s (virtual %s)", srw, sv) - } - return sf -} - -func (c *containerContext) Labels() string { - c.addHeader(labelsHeader) - if c.c.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.c.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *containerContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.addHeader(h) - - if c.c.Labels == nil { - return "" - } - return c.c.Labels[name] -} - -func (c *containerContext) Mounts() string { - c.addHeader(mountsHeader) - - var name string - var mounts []string - for _, m := range c.c.Mounts { - if m.Name == "" { - name = m.Source - } else { - name = m.Name - } - if c.trunc { - name = stringutils.Truncate(name, 15) - } - mounts = append(mounts, name) - } - return strings.Join(mounts, ",") -} - -type imageContext struct { - baseSubContext - trunc bool - i types.Image - repo string - tag string - digest string -} - -func (c *imageContext) ID() string { - c.addHeader(imageIDHeader) - if c.trunc { - return stringid.TruncateID(c.i.ID) - } - return c.i.ID -} - -func (c *imageContext) Repository() string { - c.addHeader(repositoryHeader) - return c.repo -} - -func (c *imageContext) Tag() string { - c.addHeader(tagHeader) - return c.tag -} - -func (c *imageContext) Digest() string { - c.addHeader(digestHeader) - return c.digest -} - -func (c *imageContext) CreatedSince() string { - c.addHeader(createdSinceHeader) - createdAt := time.Unix(int64(c.i.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *imageContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.i.Created), 0).String() -} - -func (c *imageContext) Size() string { - c.addHeader(sizeHeader) - return units.HumanSize(float64(c.i.Size)) -} - -type subContext interface { - fullHeader() string - addHeader(header string) -} - -type baseSubContext struct { - header []string -} - -func (c *baseSubContext) fullHeader() string { - if c.header == nil { - return "" - } - return strings.Join(c.header, "\t") -} - -func (c *baseSubContext) addHeader(header string) { - if c.header == nil { - c.header = []string{} - } - c.header = append(c.header, strings.ToUpper(header)) -} - -func stripNamePrefix(ss []string) []string { - sss := make([]string, len(ss)) - for i, s := range ss { - sss[i] = s[1:] - } - - return sss -} diff --git a/api/client/formatter/custom_test.go b/api/client/formatter/custom_test.go deleted file mode 100644 index 6a21f2bcd..000000000 --- a/api/client/formatter/custom_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package formatter - -import ( - "reflect" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" -) - -func TestContainerPsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - unix := time.Now().Add(-65 * time.Second).Unix() - - var ctx containerContext - cases := []struct { - container types.Container - trunc bool - expValue string - expHeader string - call func() string - }{ - {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, - {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, - {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, - {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - true, - "a5a665ff33ec", - imageHeader, - ctx.Image, - }, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - false, - "a5a665ff33eced1e0803148700880edab4", - imageHeader, - ctx.Image, - }, - {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, - {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, - {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, - {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, - {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, - {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, - {types.Container{}, true, "", labelsHeader, ctx.Labels}, - {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, - {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, - } - - for _, c := range cases { - ctx = containerContext{c: c.container, trunc: c.trunc} - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.fullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } - - c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} - ctx = containerContext{c: c1, trunc: true} - - sid := ctx.Label("com.docker.swarm.swarm-id") - node := ctx.Label("com.docker.swarm.node_name") - if sid != "33" { - t.Fatalf("Expected 33, was %s\n", sid) - } - - if node != "ubuntu" { - t.Fatalf("Expected ubuntu, was %s\n", node) - } - - h := ctx.fullHeader() - if h != "SWARM ID\tNODE NAME" { - t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) - - } - - c2 := types.Container{} - ctx = containerContext{c: c2, trunc: true} - - label := ctx.Label("anything.really") - if label != "" { - t.Fatalf("Expected an empty string, was %s", label) - } - - ctx = containerContext{c: c2, trunc: true} - fullHeader := ctx.fullHeader() - if fullHeader != "" { - t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader) - } - -} - -func TestImagesContext(t *testing.T) { - imageID := stringid.GenerateRandomID() - unix := time.Now().Unix() - - var ctx imageContext - cases := []struct { - imageCtx imageContext - expValue string - expHeader string - call func() string - }{ - {imageContext{ - i: types.Image{ID: imageID}, - trunc: true, - }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, - {imageContext{ - i: types.Image{ID: imageID}, - trunc: false, - }, imageID, imageIDHeader, ctx.ID}, - {imageContext{ - i: types.Image{Size: 10}, - trunc: true, - }, "10 B", sizeHeader, ctx.Size}, - {imageContext{ - i: types.Image{Created: unix}, - trunc: true, - }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - // FIXME - // {imageContext{ - // i: types.Image{Created: unix}, - // trunc: true, - // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, - {imageContext{ - i: types.Image{}, - repo: "busybox", - }, "busybox", repositoryHeader, ctx.Repository}, - {imageContext{ - i: types.Image{}, - tag: "latest", - }, "latest", tagHeader, ctx.Tag}, - {imageContext{ - i: types.Image{}, - digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", - }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, - } - - for _, c := range cases { - ctx = c.imageCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.fullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } -} - -func compareMultipleValues(t *testing.T, value, expected string) { - // comma-separated values means probably a map input, which won't - // be guaranteed to have the same order as our expected value - // We'll create maps and use reflect.DeepEquals to check instead: - entriesMap := make(map[string]string) - expMap := make(map[string]string) - entries := strings.Split(value, ",") - expectedEntries := strings.Split(expected, ",") - for _, entry := range entries { - keyval := strings.Split(entry, "=") - entriesMap[keyval[0]] = keyval[1] - } - for _, expected := range expectedEntries { - keyval := strings.Split(expected, "=") - expMap[keyval[0]] = keyval[1] - } - if !reflect.DeepEqual(expMap, entriesMap) { - t.Fatalf("Expected entries: %v, got: %v", expected, value) - } -} diff --git a/api/client/formatter/formatter.go b/api/client/formatter/formatter.go deleted file mode 100644 index 1e250a252..000000000 --- a/api/client/formatter/formatter.go +++ /dev/null @@ -1,307 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/tabwriter" - "text/template" - - "github.com/docker/docker/reference" - "github.com/docker/docker/utils/templates" - "github.com/docker/engine-api/types" -) - -const ( - tableFormatKey = "table" - rawFormatKey = "raw" - - defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" - defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultQuietFormat = "{{.ID}}" -) - -// Context contains information required by the formatter to print the output as desired. -type Context struct { - // Output is the output stream to which the formatted string is written. - Output io.Writer - // Format is used to choose raw, table or custom format for the output. - Format string - // Quiet when set to true will simply print minimal information. - Quiet bool - // Trunc when set to true will truncate the output of certain fields such as Container ID. - Trunc bool - - // internal element - table bool - finalFormat string - header string - buffer *bytes.Buffer -} - -func (c *Context) preformat() { - c.finalFormat = c.Format - - if strings.HasPrefix(c.Format, tableKey) { - c.table = true - c.finalFormat = c.finalFormat[len(tableKey):] - } - - c.finalFormat = strings.Trim(c.finalFormat, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - c.finalFormat = r.Replace(c.finalFormat) -} - -func (c *Context) parseFormat() (*template.Template, error) { - tmpl, err := templates.Parse(c.finalFormat) - if err != nil { - c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - } - return tmpl, err -} - -func (c *Context) postformat(tmpl *template.Template, subContext subContext) { - if c.table { - if len(c.header) == 0 { - // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template - tmpl.Execute(bytes.NewBufferString(""), subContext) - c.header = subContext.fullHeader() - } - - t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) - t.Write([]byte(c.header)) - t.Write([]byte("\n")) - c.buffer.WriteTo(t) - t.Flush() - } else { - c.buffer.WriteTo(c.Output) - } -} - -func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { - if err := tmpl.Execute(c.buffer, subContext); err != nil { - c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - return err - } - if c.table && len(c.header) == 0 { - c.header = subContext.fullHeader() - } - c.buffer.WriteString("\n") - return nil -} - -// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. -type ContainerContext struct { - Context - // Size when set to true will display the size of the output. - Size bool - // Containers - Containers []types.Container -} - -// ImageContext contains image specific information required by the formater, encapsulate a Context struct. -type ImageContext struct { - Context - Digest bool - // Images - Images []types.Image -} - -func (ctx ContainerContext) Write() { - switch ctx.Format { - case tableFormatKey: - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } else { - ctx.Format = defaultContainerTableFormat - if ctx.Size { - ctx.Format += `\t{{.Size}}` - } - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `container_id: {{.ID}}` - } else { - ctx.Format = `container_id: {{.ID}}\nimage: {{.Image}}\ncommand: {{.Command}}\ncreated_at: {{.CreatedAt}}\nstatus: {{.Status}}\nnames: {{.Names}}\nlabels: {{.Labels}}\nports: {{.Ports}}\n` - if ctx.Size { - ctx.Format += `size: {{.Size}}\n` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, container := range ctx.Containers { - containerCtx := &containerContext{ - trunc: ctx.Trunc, - c: container, - } - err = ctx.contextFormat(tmpl, containerCtx) - if err != nil { - return - } - } - - ctx.postformat(tmpl, &containerContext{}) -} - -func isDangling(image types.Image) bool { - return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" -} - -func (ctx ImageContext) Write() { - switch ctx.Format { - case tableFormatKey: - ctx.Format = defaultImageTableFormat - if ctx.Digest { - ctx.Format = defaultImageTableFormatWithDigest - } - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `image_id: {{.ID}}` - } else { - if ctx.Digest { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -digest: {{.Digest}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } else { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { - ctx.finalFormat += "\t{{.Digest}}" - } - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, image := range ctx.Images { - images := []*imageContext{} - if isDangling(image) { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: "", - tag: "", - digest: "", - }) - } else { - repoTags := map[string][]string{} - repoDigests := map[string][]string{} - - for _, refString := range append(image.RepoTags) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) - } - } - for _, refString := range append(image.RepoDigests) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if c, ok := ref.(reference.Canonical); ok { - repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) - } - } - - for repo, tags := range repoTags { - digests := repoDigests[repo] - - // Do not display digests as their own row - delete(repoDigests, repo) - - if !ctx.Digest { - // Ignore digest references, just show tag once - digests = nil - } - - for _, tag := range tags { - if len(digests) == 0 { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: "", - }) - continue - } - // Display the digests for each tag - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: dgst, - }) - } - - } - } - - // Show rows for remaining digest only references - for repo, digests := range repoDigests { - // If digests are displayed, show row per digest - if ctx.Digest { - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - digest: dgst, - }) - } - } else { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - }) - } - } - } - for _, imageCtx := range images { - err = ctx.contextFormat(tmpl, imageCtx) - if err != nil { - return - } - } - } - - ctx.postformat(tmpl, &imageContext{}) -} diff --git a/api/client/formatter/formatter_test.go b/api/client/formatter/formatter_test.go deleted file mode 100644 index 07cde63f9..000000000 --- a/api/client/formatter/formatter_test.go +++ /dev/null @@ -1,537 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "testing" - "time" - - "github.com/docker/engine-api/types" -) - -func TestContainerContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - contexts := []struct { - context ContainerContext - expected string - }{ - // Errors - { - ContainerContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ContainerContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ContainerContext{ - Context: Context{ - Format: "table", - }, - }, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -containerID1 ubuntu "" 24 hours ago foobar_baz -containerID2 ubuntu "" 24 hours ago foobar_bar -`, - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - }, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - }, - Size: true, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Quiet: true, - }, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - }, - "containerID1\ncontainerID2\n", - }, - // Raw Format - { - ContainerContext{ - Context: Context{ - Format: "raw", - }, - }, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: - -`, expectedTime, expectedTime), - }, - { - ContainerContext{ - Context: Context{ - Format: "raw", - }, - Size: true, - }, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: -size: 0 B - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: -size: 0 B - -`, expectedTime, expectedTime), - }, - { - ContainerContext{ - Context: Context{ - Format: "raw", - Quiet: true, - }, - }, - "container_id: containerID1\ncontainer_id: containerID2\n", - }, - // Custom Format - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - }, - }, - "ubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - }, - Size: true, - }, - "ubuntu\nubuntu\n", - }, - } - - for _, context := range contexts { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, - } - out := bytes.NewBufferString("") - context.context.Output = out - context.context.Containers = containers - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestContainerContextWriteWithNoContainers(t *testing.T) { - out := bytes.NewBufferString("") - containers := []types.Container{} - - contexts := []struct { - context ContainerContext - expected string - }{ - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - Output: out, - }, - }, - "", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Output: out, - }, - }, - "IMAGE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - Output: out, - }, - Size: true, - }, - "", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Output: out, - }, - Size: true, - }, - "IMAGE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - }, - "IMAGE SIZE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - Size: true, - }, - "IMAGE SIZE\n", - }, - } - - for _, context := range contexts { - context.context.Containers = containers - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestImageContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - contexts := []struct { - context ImageContext - expected string - }{ - // Errors - { - ImageContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ImageContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ImageContext{ - Context: Context{ - Format: "table", - }, - }, - `REPOSITORY TAG IMAGE ID CREATED SIZE -image tag1 imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - }, - Digest: true, - }, - `REPOSITORY DIGEST -image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image - -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Quiet: true, - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - }, - "imageID1\nimageID2\nimageID3\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: false, - }, - Digest: true, - }, - `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - Digest: true, - }, - "imageID1\nimageID2\nimageID3\n", - }, - // Raw Format - { - ImageContext{ - Context: Context{ - Format: "raw", - }, - }, - fmt.Sprintf(`repository: image -tag: tag1 -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: "raw", - }, - Digest: true, - }, - fmt.Sprintf(`repository: image -tag: tag1 -digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -digest: -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -digest: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: "raw", - Quiet: true, - }, - }, - `image_id: imageID1 -image_id: imageID2 -image_id: imageID3 -`, - }, - // Custom Format - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - }, - }, - "image\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - }, - Digest: true, - }, - "image\nimage\n\n", - }, - } - - for _, context := range contexts { - images := []types.Image{ - {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, - {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, - {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, - } - out := bytes.NewBufferString("") - context.context.Output = out - context.context.Images = images - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestImageContextWriteWithNoImage(t *testing.T) { - out := bytes.NewBufferString("") - images := []types.Image{} - - contexts := []struct { - context ImageContext - expected string - }{ - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Output: out, - }, - }, - "REPOSITORY\n", - }, - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - Output: out, - }, - Digest: true, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Output: out, - }, - Digest: true, - }, - "REPOSITORY DIGEST\n", - }, - } - - for _, context := range contexts { - context.context.Images = images - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} diff --git a/api/client/hijack.go b/api/client/hijack.go deleted file mode 100644 index 294078e44..000000000 --- a/api/client/hijack.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "io" - "sync" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" -) - -// HoldHijackedConnection handles copying input to and output from streams to the -// connection -func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var ( - err error - restoreOnce sync.Once - ) - if inputStream != nil && tty { - if err := cli.setRawTerminal(); err != nil { - return err - } - defer func() { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - }() - } - - receiveStdout := make(chan error, 1) - if outputStream != nil || errorStream != nil { - go func() { - // When TTY is ON, use regular copy - if tty && outputStream != nil { - _, err = io.Copy(outputStream, resp.Reader) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if inputStream != nil { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - } - } else { - _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) - } - - logrus.Debug("[hijack] End of stdout") - receiveStdout <- err - }() - } - - stdinDone := make(chan struct{}) - go func() { - if inputStream != nil { - io.Copy(resp.Conn, inputStream) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if tty { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - } - logrus.Debug("[hijack] End of stdin") - } - - if err := resp.CloseWrite(); err != nil { - logrus.Debugf("Couldn't send EOF: %s", err) - } - close(stdinDone) - }() - - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-stdinDone: - if outputStream != nil || errorStream != nil { - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-ctx.Done(): - } - } - case <-ctx.Done(): - } - - return nil -} diff --git a/api/client/idresolver/idresolver.go b/api/client/idresolver/idresolver.go deleted file mode 100644 index 9b38d151b..000000000 --- a/api/client/idresolver/idresolver.go +++ /dev/null @@ -1,70 +0,0 @@ -package idresolver - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types/swarm" -) - -// IDResolver provides ID to Name resolution. -type IDResolver struct { - client client.APIClient - noResolve bool - cache map[string]string -} - -// New creates a new IDResolver. -func New(client client.APIClient, noResolve bool) *IDResolver { - return &IDResolver{ - client: client, - noResolve: noResolve, - cache: make(map[string]string), - } -} - -func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { - switch t.(type) { - case swarm.Node: - node, _, err := r.client.NodeInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - if node.Spec.Annotations.Name != "" { - return node.Spec.Annotations.Name, nil - } - if node.Description.Hostname != "" { - return node.Description.Hostname, nil - } - return id, nil - case swarm.Service: - service, _, err := r.client.ServiceInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - return service.Spec.Annotations.Name, nil - default: - return "", fmt.Errorf("unsupported type") - } - -} - -// Resolve will attempt to resolve an ID to a Name by querying the manager. -// Results are stored into a cache. -// If the `-n` flag is used in the command-line, resolution is disabled. -func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { - if r.noResolve { - return id, nil - } - if name, ok := r.cache[id]; ok { - return name, nil - } - name, err := r.get(ctx, t, id) - if err != nil { - return "", err - } - r.cache[id] = name - return name, nil -} diff --git a/api/client/image/build.go b/api/client/image/build.go deleted file mode 100644 index 38f4bcd43..000000000 --- a/api/client/image/build.go +++ /dev/null @@ -1,432 +0,0 @@ -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "runtime" - - "golang.org/x/net/context" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/client" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/reference" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type buildOptions struct { - context string - dockerfileName string - tags opts.ListOpts - labels []string - buildArgs opts.ListOpts - ulimits *runconfigopts.UlimitOpt - memory string - memorySwap string - shmSize string - cpuShares int64 - cpuPeriod int64 - cpuQuota int64 - cpuSetCpus string - cpuSetMems string - cgroupParent string - isolation string - quiet bool - noCache bool - rm bool - forceRm bool - pull bool -} - -// NewBuildCommand creates a new `docker build` command -func NewBuildCommand(dockerCli *client.DockerCli) *cobra.Command { - ulimits := make(map[string]*units.Ulimit) - options := buildOptions{ - tags: opts.NewListOpts(validateTag), - buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), - ulimits: runconfigopts.NewUlimitOpt(&ulimits), - } - - cmd := &cobra.Command{ - Use: "build [OPTIONS] PATH | URL | -", - Short: "Build an image from a Dockerfile", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.context = args[0] - return runBuild(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") - flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") - flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") - flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") - flags.StringSliceVar(&options.labels, "label", []string{}, "Set metadata for an image") - flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") - flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") - flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") - - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runBuild(dockerCli *client.DockerCli, options buildOptions) error { - - var ( - buildCtx io.ReadCloser - err error - ) - - specifiedContext := options.context - - var ( - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - ) - - progBuff = dockerCli.Out() - buildBuff = dockerCli.Out() - if options.quiet { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - - switch { - case specifiedContext == "-": - buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) - case urlutil.IsURL(specifiedContext): - buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) - default: - contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) - } - - if err != nil { - if options.quiet && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(dockerCli.Err(), progBuff) - } - return fmt.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - if buildCtx == nil { - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return err - } - - var excludes []string - if err == nil { - excludes, err = dockerignore.ReadAll(f) - if err != nil { - return err - } - } - - if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { - return fmt.Errorf("Error checking context: '%s'.", err) - } - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The daemon will remove them for us, if needed, after it - // parses the Dockerfile. Ignore errors here, as they will have been - // caught by validateContextDirectory above. - var includes = []string{"."} - keepThem1, _ := fileutils.Matches(".dockerignore", excludes) - keepThem2, _ := fileutils.Matches(relDockerfile, excludes) - if keepThem1 || keepThem2 { - includes = append(includes, ".dockerignore", relDockerfile) - } - - buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: excludes, - IncludeFiles: includes, - }) - if err != nil { - return err - } - } - - ctx := context.Background() - - var resolvedTags []*resolvedTag - if client.IsTrusted() { - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, dockerCli.TrustedReference, &resolvedTags) - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - - var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") - - var memory int64 - if options.memory != "" { - parsedMemory, err := units.RAMInBytes(options.memory) - if err != nil { - return err - } - memory = parsedMemory - } - - var memorySwap int64 - if options.memorySwap != "" { - if options.memorySwap == "-1" { - memorySwap = -1 - } else { - parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) - if err != nil { - return err - } - memorySwap = parsedMemorySwap - } - } - - var shmSize int64 - if options.shmSize != "" { - shmSize, err = units.RAMInBytes(options.shmSize) - if err != nil { - return err - } - } - - buildOptions := types.ImageBuildOptions{ - Memory: memory, - MemorySwap: memorySwap, - Tags: options.tags.GetAll(), - SuppressOutput: options.quiet, - NoCache: options.noCache, - Remove: options.rm, - ForceRemove: options.forceRm, - PullParent: options.pull, - Isolation: container.Isolation(options.isolation), - CPUSetCPUs: options.cpuSetCpus, - CPUSetMems: options.cpuSetMems, - CPUShares: options.cpuShares, - CPUQuota: options.cpuQuota, - CPUPeriod: options.cpuPeriod, - CgroupParent: options.cgroupParent, - Dockerfile: relDockerfile, - ShmSize: shmSize, - Ulimits: options.ulimits.GetList(), - BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()), - AuthConfigs: dockerCli.RetrieveAuthConfigs(), - Labels: runconfigopts.ConvertKVStringsToMap(options.labels), - } - - response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) - if err != nil { - return err - } - defer response.Body.Close() - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { - fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if options.quiet { - fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) - } - - if client.IsTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := dockerCli.TagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - ref, err := reference.ParseNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.WithDefaultTag(ref) - if ref, ok := ref.(reference.NamedTagged); ok && client.IsTrusted() { - trustedRef, err := translator(ctx, ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - var content io.Reader = tarReader - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/api/client/image/history.go b/api/client/image/history.go deleted file mode 100644 index abf2a0bb3..000000000 --- a/api/client/image/history.go +++ /dev/null @@ -1,99 +0,0 @@ -package image - -import ( - "fmt" - "strconv" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type historyOptions struct { - image string - - human bool - quiet bool - noTrunc bool -} - -// NewHistoryCommand create a new `docker history` command -func NewHistoryCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts historyOptions - - cmd := &cobra.Command{ - Use: "history [OPTIONS] IMAGE", - Short: "Show the history of an image", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - return runHistory(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - - return cmd -} - -func runHistory(dockerCli *client.DockerCli, opts historyOptions) error { - ctx := context.Background() - - history, err := dockerCli.Client().ImageHistory(ctx, opts.image) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - - if opts.quiet { - for _, entry := range history { - if opts.noTrunc { - fmt.Fprintf(w, "%s\n", entry.ID) - } else { - fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) - } - } - w.Flush() - return nil - } - - var imageID string - var createdBy string - var created string - var size string - - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") - for _, entry := range history { - imageID = entry.ID - createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) - if opts.noTrunc == false { - createdBy = stringutils.Truncate(createdBy, 45) - imageID = stringid.TruncateID(entry.ID) - } - - if opts.human { - created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" - size = units.HumanSize(float64(entry.Size)) - } else { - created = time.Unix(entry.Created, 0).Format(time.RFC3339) - size = strconv.FormatInt(entry.Size, 10) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) - } - w.Flush() - return nil -} diff --git a/api/client/image/images.go b/api/client/image/images.go deleted file mode 100644 index 460fb7980..000000000 --- a/api/client/image/images.go +++ /dev/null @@ -1,103 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/formatter" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type imagesOptions struct { - matchName string - - quiet bool - all bool - noTrunc bool - showDigests bool - format string - filter []string -} - -// NewImagesCommand create a new `docker images` command -func NewImagesCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts imagesOptions - - cmd := &cobra.Command{ - Use: "images [OPTIONS] [REPOSITORY[:TAG]]", - Short: "List images", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.matchName = args[0] - } - return runImages(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runImages(dockerCli *client.DockerCli, opts imagesOptions) error { - ctx := context.Background() - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - imageFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) - if err != nil { - return err - } - } - - matchName := opts.matchName - - options := types.ImageListOptions{ - MatchName: matchName, - All: opts.all, - Filters: imageFilterArgs, - } - - images, err := dockerCli.Client().ImageList(ctx, options) - if err != nil { - return err - } - - f := opts.format - if len(f) == 0 { - if len(dockerCli.ImagesFormat()) > 0 && !opts.quiet { - f = dockerCli.ImagesFormat() - } else { - f = "table" - } - } - - imagesCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: f, - Quiet: opts.quiet, - Trunc: !opts.noTrunc, - }, - Digest: opts.showDigests, - Images: images, - } - - imagesCtx.Write() - - return nil -} diff --git a/api/client/image/import.go b/api/client/image/import.go deleted file mode 100644 index 2a7c37b90..000000000 --- a/api/client/image/import.go +++ /dev/null @@ -1,86 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type importOptions struct { - source string - reference string - changes []string - message string -} - -// NewImportCommand creates a new `docker import` command -func NewImportCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts importOptions - - cmd := &cobra.Command{ - Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", - Short: "Import the contents from a tarball to create a filesystem image", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.source = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runImport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringSliceVarP(&opts.changes, "change", "c", []string{}, "Apply Dockerfile instruction to the created image") - flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") - - return cmd -} - -func runImport(dockerCli *client.DockerCli, opts importOptions) error { - var ( - in io.Reader - srcName = opts.source - ) - - if opts.source == "-" { - in = dockerCli.In() - } else if !urlutil.IsURL(opts.source) { - srcName = "-" - file, err := os.Open(opts.source) - if err != nil { - return err - } - defer file.Close() - in = file - } - - source := types.ImageImportSource{ - Source: in, - SourceName: srcName, - } - - options := types.ImageImportOptions{ - Message: opts.message, - Changes: opts.changes, - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) -} diff --git a/api/client/image/load.go b/api/client/image/load.go deleted file mode 100644 index 240362d1e..000000000 --- a/api/client/image/load.go +++ /dev/null @@ -1,67 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/spf13/cobra" -) - -type loadOptions struct { - input string - quiet bool -} - -// NewLoadCommand creates a new `docker load` command -func NewLoadCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts loadOptions - - cmd := &cobra.Command{ - Use: "load [OPTIONS]", - Short: "Load an image from a tar archive or STDIN", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLoad(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") - - return cmd -} - -func runLoad(dockerCli *client.DockerCli, opts loadOptions) error { - - var input io.Reader = dockerCli.In() - if opts.input != "" { - file, err := os.Open(opts.input) - if err != nil { - return err - } - defer file.Close() - input = file - } - if !dockerCli.IsTerminalOut() { - opts.quiet = true - } - response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) - if err != nil { - return err - } - defer response.Body.Close() - - if response.Body != nil && response.JSON { - return jsonmessage.DisplayJSONMessagesStream(response.Body, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) - } - - _, err = io.Copy(dockerCli.Out(), response.Body) - return err -} diff --git a/api/client/image/pull.go b/api/client/image/pull.go deleted file mode 100644 index e5968db26..000000000 --- a/api/client/image/pull.go +++ /dev/null @@ -1,85 +0,0 @@ -package image - -import ( - "errors" - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -type pullOptions struct { - remote string - all bool -} - -// NewPullCommand creates a new `docker pull` command -func NewPullCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts pullOptions - - cmd := &cobra.Command{ - Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", - Short: "Pull an image or a repository from a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runPull(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runPull(dockerCli *client.DockerCli, opts pullOptions) error { - distributionRef, err := reference.ParseNamed(opts.remote) - if err != nil { - return err - } - if opts.all && !reference.IsNameOnly(distributionRef) { - return errors.New("tag can't be used with --all-tags/-a") - } - - if !opts.all && reference.IsNameOnly(distributionRef) { - distributionRef = reference.WithDefaultTag(distributionRef) - fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) - } - - var tag string - switch x := distributionRef.(type) { - case reference.Canonical: - tag = x.Digest().String() - case reference.NamedTagged: - tag = x.Tag() - } - - registryRef := registry.ParseReference(tag) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") - - if client.IsTrusted() && !registryRef.HasDigest() { - // Check if tag is digest - return dockerCli.TrustedPull(ctx, repoInfo, registryRef, authConfig, requestPrivilege) - } - - return dockerCli.ImagePullPrivileged(ctx, authConfig, distributionRef.String(), requestPrivilege, opts.all) - -} diff --git a/api/client/image/push.go b/api/client/image/push.go deleted file mode 100644 index 1526b2a26..000000000 --- a/api/client/image/push.go +++ /dev/null @@ -1,62 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewPushCommand creates a new `docker push` command -func NewPushCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] NAME[:TAG]", - Short: "Push an image or a repository to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runPush(dockerCli *client.DockerCli, remote string) error { - ref, err := reference.ParseNamed(remote) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - ctx := context.Background() - - // Resolve the Auth config relevant for this server - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "push") - - if client.IsTrusted() { - return dockerCli.TrustedPush(ctx, repoInfo, ref, authConfig, requestPrivilege) - } - - responseBody, err := dockerCli.ImagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) -} diff --git a/api/client/image/remove.go b/api/client/image/remove.go deleted file mode 100644 index c7c8322b8..000000000 --- a/api/client/image/remove.go +++ /dev/null @@ -1,70 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - noPrune bool -} - -// NewRemoveCommand create a new `docker remove` command -func NewRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rmi [OPTIONS] IMAGE [IMAGE...]", - Short: "Remove one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, opts, args) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") - flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, opts removeOptions, images []string) error { - client := dockerCli.Client() - ctx := context.Background() - - options := types.ImageRemoveOptions{ - Force: opts.force, - PruneChildren: !opts.noPrune, - } - - var errs []string - for _, image := range images { - dels, err := client.ImageRemove(ctx, image, options) - if err != nil { - errs = append(errs, err.Error()) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) - } - } - } - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/image/save.go b/api/client/image/save.go deleted file mode 100644 index c5ea849a3..000000000 --- a/api/client/image/save.go +++ /dev/null @@ -1,57 +0,0 @@ -package image - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type saveOptions struct { - images []string - output string -} - -// NewSaveCommand creates a new `docker save` command -func NewSaveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts saveOptions - - cmd := &cobra.Command{ - Use: "save [OPTIONS] IMAGE [IMAGE...]", - Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.images = args - return runSave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runSave(dockerCli *client.DockerCli, opts saveOptions) error { - if opts.output == "" && dockerCli.IsTerminalOut() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return client.CopyToFile(opts.output, responseBody) -} diff --git a/api/client/image/search.go b/api/client/image/search.go deleted file mode 100644 index d42b8aaf6..000000000 --- a/api/client/image/search.go +++ /dev/null @@ -1,135 +0,0 @@ -package image - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/spf13/cobra" -) - -type searchOptions struct { - term string - noTrunc bool - limit int - filter []string - - // Deprecated - stars uint - automated bool -} - -// NewSearchCommand create a new `docker search` command -func NewSearchCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts searchOptions - - cmd := &cobra.Command{ - Use: "search [OPTIONS] TERM", - Short: "Search the Docker Hub for images", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.term = args[0] - return runSearch(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") - - flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") - flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") - - flags.MarkDeprecated("automated", "use --filter=automated=true instead") - flags.MarkDeprecated("stars", "use --filter=stars=3 instead") - - return cmd -} - -func runSearch(dockerCli *client.DockerCli, opts searchOptions) error { - indexInfo, err := registry.ParseSearchIndexInfo(opts.term) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := dockerCli.ResolveAuthConfig(ctx, indexInfo) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(indexInfo, "search") - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - searchFilters := filters.NewArgs() - for _, f := range opts.filter { - var err error - searchFilters, err = filters.ParseFlag(f, searchFilters) - if err != nil { - return err - } - } - - options := types.ImageSearchOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - Filters: searchFilters, - Limit: opts.limit, - } - - clnt := dockerCli.Client() - - unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) - if err != nil { - return err - } - - results := searchResultsByStars(unorderedResults) - sort.Sort(results) - - w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, res := range results { - // --automated and -s, --stars are deprecated since Docker 1.12 - if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { - continue - } - desc := strings.Replace(res.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !opts.noTrunc && len(desc) > 45 { - desc = stringutils.Truncate(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) - if res.IsOfficial { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if res.IsAutomated { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// SearchResultsByStars sorts search results in descending order by number of stars. -type searchResultsByStars []registrytypes.SearchResult - -func (r searchResultsByStars) Len() int { return len(r) } -func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/api/client/image/tag.go b/api/client/image/tag.go deleted file mode 100644 index 665e3430c..000000000 --- a/api/client/image/tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type tagOptions struct { - image string - name string -} - -// NewTagCommand create a new `docker tag` command -func NewTagCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts tagOptions - - cmd := &cobra.Command{ - Use: "tag IMAGE[:TAG] IMAGE[:TAG]", - Short: "Tag an image into a repository", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - opts.name = args[1] - return runTag(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTag(dockerCli *client.DockerCli, opts tagOptions) error { - ctx := context.Background() - - return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) -} diff --git a/api/client/inspect.go b/api/client/inspect.go deleted file mode 100644 index e8c0de64e..000000000 --- a/api/client/inspect.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client/inspect" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/client" -) - -// CmdInspect displays low-level information on one or more containers, images or tasks. -// -// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...] -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]"}, Cli.DockerCommands["inspect"].Description, true) - tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") - inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image, container or task)") - size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - if *inspectType != "" && *inspectType != "container" && *inspectType != "image" && *inspectType != "task" { - return fmt.Errorf("%q is not a valid value for --type", *inspectType) - } - - ctx := context.Background() - - var elementSearcher inspect.GetRefFunc - switch *inspectType { - case "container": - elementSearcher = cli.inspectContainers(ctx, *size) - case "image": - elementSearcher = cli.inspectImages(ctx, *size) - case "task": - if *size { - fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks") - } - elementSearcher = cli.inspectTasks(ctx) - default: - elementSearcher = cli.inspectAll(ctx, *size) - } - - return inspect.Inspect(cli.out, cmd.Args(), *tmplStr, elementSearcher) -} - -func (cli *DockerCli) inspectContainers(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ContainerInspectWithRaw(ctx, ref, getSize) - } -} - -func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ImageInspectWithRaw(ctx, ref, getSize) - } -} - -func (cli *DockerCli) inspectTasks(ctx context.Context) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.TaskInspectWithRaw(ctx, ref) - } -} - -func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize) - if err != nil { - // Search for image with that id if a container doesn't exist. - if client.IsErrContainerNotFound(err) { - i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize) - if err != nil { - if client.IsErrImageNotFound(err) { - // Search for task with that id if an image doesn't exists. - t, rawTask, err := cli.client.TaskInspectWithRaw(ctx, ref) - if err != nil { - return nil, nil, fmt.Errorf("Error: No such image, container or task: %s", ref) - } - if getSize { - fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks") - } - return t, rawTask, nil - } - return nil, nil, err - } - return i, rawImage, nil - } - return nil, nil, err - } - return c, rawContainer, nil - } -} diff --git a/api/client/inspect/inspector.go b/api/client/inspect/inspector.go deleted file mode 100644 index b0537e846..000000000 --- a/api/client/inspect/inspector.go +++ /dev/null @@ -1,195 +0,0 @@ -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "text/template" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cli" - "github.com/docker/docker/utils/templates" -) - -// Inspector defines an interface to implement to process elements -type Inspector interface { - Inspect(typedElement interface{}, rawElement []byte) error - Flush() error -} - -// TemplateInspector uses a text template to inspect elements. -type TemplateInspector struct { - outputStream io.Writer - buffer *bytes.Buffer - tmpl *template.Template -} - -// NewTemplateInspector creates a new inspector with a template. -func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { - return &TemplateInspector{ - outputStream: outputStream, - buffer: new(bytes.Buffer), - tmpl: tmpl, - } -} - -// NewTemplateInspectorFromString creates a new TemplateInspector from a string -// which is compiled into a template. -func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { - if tmplStr == "" { - return NewIndentedInspector(out), nil - } - - tmpl, err := templates.Parse(tmplStr) - if err != nil { - return nil, fmt.Errorf("Template parsing error: %s", err) - } - return NewTemplateInspector(out, tmpl), nil -} - -// GetRefFunc is a function which used by Inspect to fetch an object from a -// reference -type GetRefFunc func(ref string) (interface{}, []byte, error) - -// Inspect fetches objects by reference using GetRefFunc and writes the json -// representation to the output writer. -func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { - inspector, err := NewTemplateInspectorFromString(out, tmplStr) - if err != nil { - return cli.StatusError{StatusCode: 64, Status: err.Error()} - } - - var inspectErr error - for _, ref := range references { - element, raw, err := getRef(ref) - if err != nil { - inspectErr = err - break - } - - if err := inspector.Inspect(element, raw); err != nil { - inspectErr = err - break - } - } - - if err := inspector.Flush(); err != nil { - logrus.Errorf("%s\n", err) - } - - if inspectErr != nil { - return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} - } - return nil -} - -// Inspect executes the inspect template. -// It decodes the raw element into a map if the initial execution fails. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { - buffer := new(bytes.Buffer) - if err := i.tmpl.Execute(buffer, typedElement); err != nil { - if rawElement == nil { - return fmt.Errorf("Template parsing error: %v", err) - } - return i.tryRawInspectFallback(rawElement) - } - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// tryRawInspectFallback executes the inspect template with a raw interface. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return fmt.Errorf("unable to read inspect data: %v", rawErr) - } - - tmplMissingKey := i.tmpl.Option("missingkey=error") - if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { - return fmt.Errorf("Template parsing error: %v", rawErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *TemplateInspector) Flush() error { - if i.buffer.Len() == 0 { - _, err := io.WriteString(i.outputStream, "\n") - return err - } - _, err := io.Copy(i.outputStream, i.buffer) - return err -} - -// IndentedInspector uses a buffer to stop the indented representation of an element. -type IndentedInspector struct { - outputStream io.Writer - elements []interface{} - rawElements [][]byte -} - -// NewIndentedInspector generates a new IndentedInspector. -func NewIndentedInspector(outputStream io.Writer) Inspector { - return &IndentedInspector{ - outputStream: outputStream, - } -} - -// Inspect writes the raw element with an indented json format. -func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { - if rawElement != nil { - i.rawElements = append(i.rawElements, rawElement) - } else { - i.elements = append(i.elements, typedElement) - } - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *IndentedInspector) Flush() error { - if len(i.elements) == 0 && len(i.rawElements) == 0 { - _, err := io.WriteString(i.outputStream, "[]\n") - return err - } - - var buffer io.Reader - if len(i.rawElements) > 0 { - bytesBuffer := new(bytes.Buffer) - bytesBuffer.WriteString("[") - for idx, r := range i.rawElements { - bytesBuffer.Write(r) - if idx < len(i.rawElements)-1 { - bytesBuffer.WriteString(",") - } - } - bytesBuffer.WriteString("]") - indented := new(bytes.Buffer) - if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { - return err - } - buffer = indented - } else { - b, err := json.MarshalIndent(i.elements, "", " ") - if err != nil { - return err - } - buffer = bytes.NewReader(b) - } - - if _, err := io.Copy(i.outputStream, buffer); err != nil { - return err - } - _, err := io.WriteString(i.outputStream, "\n") - return err -} diff --git a/api/client/inspect/inspector_test.go b/api/client/inspect/inspector_test.go deleted file mode 100644 index 1ce1593ab..000000000 --- a/api/client/inspect/inspector_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package inspect - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/utils/templates" -) - -type testElement struct { - DNS string `json:"Dns"` -} - -func TestTemplateInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "\n" { - t.Fatalf("Expected `\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorTemplateError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Foo}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - err = i.Inspect(testElement{"0.0.0.0"}, nil) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorRawFallback(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorRawFallbackError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n1.1.1.1\n" { - t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) - } -} - -func TestIndentedInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - }, - { - "Dns": "1.1.1.1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := "[]\n" - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorRawElements(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0", - "Node": "0" - }, - { - "Dns": "1.1.1.1", - "Node": "1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} diff --git a/api/client/network/cmd.go b/api/client/network/cmd.go deleted file mode 100644 index f616129f1..000000000 --- a/api/client/network/cmd.go +++ /dev/null @@ -1,31 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewNetworkCommand returns a cobra command for `network` subcommands -func NewNetworkCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "network", - Short: "Manage Docker networks", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newConnectCommand(dockerCli), - newCreateCommand(dockerCli), - newDisconnectCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/network/connect.go b/api/client/network/connect.go deleted file mode 100644 index 57a7299c1..000000000 --- a/api/client/network/connect.go +++ /dev/null @@ -1,64 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" -) - -type connectOptions struct { - network string - container string - ipaddress string - ipv6address string - links opts.ListOpts - aliases []string - linklocalips []string -} - -func newConnectCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := connectOptions{ - links: opts.NewListOpts(runconfigopts.ValidateLink), - } - - cmd := &cobra.Command{ - Use: "connect [OPTIONS] NETWORK CONTAINER", - Short: "Connect a container to a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runConnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") - flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") - flags.Var(&opts.links, "link", "Add link to another container") - flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") - flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") - - return cmd -} - -func runConnect(dockerCli *client.DockerCli, opts connectOptions) error { - client := dockerCli.Client() - - epConfig := &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: opts.ipaddress, - IPv6Address: opts.ipv6address, - LinkLocalIPs: opts.linklocalips, - }, - Links: opts.links.GetAll(), - Aliases: opts.aliases, - } - - return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) -} diff --git a/api/client/network/create.go b/api/client/network/create.go deleted file mode 100644 index b7460fbcc..000000000 --- a/api/client/network/create.go +++ /dev/null @@ -1,222 +0,0 @@ -package network - -import ( - "fmt" - "net" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels []string - internal bool - ipv6 bool - - ipamDriver string - ipamSubnet []string - ipamIPRange []string - ipamGateway []string - ipamAux opts.MapOpts - ipamOpt opts.MapOpts -} - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - ipamAux: *opts.NewMapOpts(nil, nil), - ipamOpt: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] NETWORK", - Short: "Create a network", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.StringSliceVar(&opts.labels, "label", []string{}, "Set metadata on a network") - flags.BoolVar(&opts.internal, "internal", false, "restricts external access to the network") - flags.BoolVar(&opts.ipv6, "ipv6", false, "enable IPv6 networking") - - flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") - flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "subnet in CIDR format that represents a network segment") - flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "allocate container ip from a sub-range") - flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "ipv4 or ipv6 Gateway for the master subnet") - - flags.Var(&opts.ipamAux, "aux-address", "auxiliary ipv4 or ipv6 addresses used by Network driver") - flags.Var(&opts.ipamOpt, "ipam-opt", "set IPAM driver specific options") - - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) - if err != nil { - return err - } - - // Construct network create request body - nc := types.NetworkCreate{ - Driver: opts.driver, - Options: opts.driverOpts.GetAll(), - IPAM: network.IPAM{ - Driver: opts.ipamDriver, - Config: ipamCfg, - Options: opts.ipamOpt.GetAll(), - }, - CheckDuplicate: true, - Internal: opts.internal, - EnableIPv6: opts.ipv6, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels), - } - - resp, err := client.NetworkCreate(context.Background(), opts.name, nc) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) - return nil -} - -// Consolidates the ipam configuration as a group from different related configurations -// user can configure network with multiple non-overlapping subnets and hence it is -// possible to correlate the various related parameters and consolidate them. -// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into -// structured ipam data. -func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { - if len(subnets) < len(ranges) || len(subnets) < len(gateways) { - return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") - } - iData := map[string]*network.IPAMConfig{} - - // Populate non-overlapping subnets into consolidation map - for _, s := range subnets { - for k := range iData { - ok1, err := subnetMatches(s, k) - if err != nil { - return nil, err - } - ok2, err := subnetMatches(k, s) - if err != nil { - return nil, err - } - if ok1 || ok2 { - return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") - } - } - iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} - } - - // Validate and add valid ip ranges - for _, r := range ranges { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, r) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].IPRange != "" { - return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) - } - d := iData[s] - d.IPRange = r - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for range %s", r) - } - } - - // Validate and add valid gateways - for _, g := range gateways { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, g) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].Gateway != "" { - return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) - } - d := iData[s] - d.Gateway = g - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for gateway %s", g) - } - } - - // Validate and add aux-addresses - for key, aa := range auxaddrs { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, aa) - if err != nil { - return nil, err - } - if !ok { - continue - } - iData[s].AuxAddress[key] = aa - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) - } - } - - idl := []network.IPAMConfig{} - for _, v := range iData { - idl = append(idl, *v) - } - return idl, nil -} - -func subnetMatches(subnet, data string) (bool, error) { - var ( - ip net.IP - ) - - _, s, err := net.ParseCIDR(subnet) - if err != nil { - return false, fmt.Errorf("Invalid subnet %s : %v", s, err) - } - - if strings.Contains(data, "/") { - ip, _, err = net.ParseCIDR(data) - if err != nil { - return false, fmt.Errorf("Invalid cidr %s : %v", data, err) - } - } else { - ip = net.ParseIP(data) - } - - return s.Contains(ip), nil -} diff --git a/api/client/network/disconnect.go b/api/client/network/disconnect.go deleted file mode 100644 index 7d5111ba6..000000000 --- a/api/client/network/disconnect.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type disconnectOptions struct { - network string - container string - force bool -} - -func newDisconnectCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := disconnectOptions{} - - cmd := &cobra.Command{ - Use: "disconnect [OPTIONS] NETWORK CONTAINER", - Short: "Disconnect a container from a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runDisconnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") - - return cmd -} - -func runDisconnect(dockerCli *client.DockerCli, opts disconnectOptions) error { - client := dockerCli.Client() - - return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) -} diff --git a/api/client/network/inspect.go b/api/client/network/inspect.go deleted file mode 100644 index 3a503e14d..000000000 --- a/api/client/network/inspect.go +++ /dev/null @@ -1,45 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NETWORK [NETWORK...]", - Short: "Display detailed information on one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getNetFunc := func(name string) (interface{}, []byte, error) { - return client.NetworkInspectWithRaw(ctx, name) - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) -} diff --git a/api/client/network/list.go b/api/client/network/list.go deleted file mode 100644 index 292b4f62b..000000000 --- a/api/client/network/list.go +++ /dev/null @@ -1,100 +0,0 @@ -package network - -import ( - "fmt" - "sort" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type byNetworkName []types.NetworkResource - -func (r byNetworkName) Len() int { return len(r) } -func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } - -type listOptions struct { - quiet bool - noTrunc bool - filter []string -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Provide filter values (i.e. 'dangling=true')") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - - netFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - netFilterArgs, err = filters.ParseFlag(f, netFilterArgs) - if err != nil { - return err - } - } - - options := types.NetworkListOptions{ - Filters: netFilterArgs, - } - - networkResources, err := client.NetworkList(context.Background(), options) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - if !opts.quiet { - fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER\tSCOPE") - fmt.Fprintf(w, "\n") - } - - sort.Sort(byNetworkName(networkResources)) - for _, networkResource := range networkResources { - ID := networkResource.ID - netName := networkResource.Name - driver := networkResource.Driver - scope := networkResource.Scope - if !opts.noTrunc { - ID = stringid.TruncateID(ID) - } - if opts.quiet { - fmt.Fprintln(w, ID) - continue - } - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t", - ID, - netName, - driver, - scope) - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} diff --git a/api/client/network/remove.go b/api/client/network/remove.go deleted file mode 100644 index f5b0196d9..000000000 --- a/api/client/network/remove.go +++ /dev/null @@ -1,43 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm NETWORK [NETWORK]...", - Aliases: []string{"remove"}, - Short: "Remove a network", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *client.DockerCli, networks []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range networks { - if err := client.NetworkRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/api/client/node/accept.go b/api/client/node/accept.go deleted file mode 100644 index e0b8c86a8..000000000 --- a/api/client/node/accept.go +++ /dev/null @@ -1,31 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -func newAcceptCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "accept NODE [NODE...]", - Short: "Accept a node in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runAccept(dockerCli, args) - }, - } -} - -func runAccept(dockerCli *client.DockerCli, nodes []string) error { - accept := func(node *swarm.Node) { - node.Spec.Membership = swarm.NodeMembershipAccepted - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Node %s accepted in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, accept, success) -} diff --git a/api/client/node/cmd.go b/api/client/node/cmd.go deleted file mode 100644 index 96c8e5bc0..000000000 --- a/api/client/node/cmd.go +++ /dev/null @@ -1,50 +0,0 @@ -package node - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - apiclient "github.com/docker/engine-api/client" -) - -// NewNodeCommand returns a cobra command for `node` subcommands -func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "node", - Short: "Manage Docker Swarm nodes", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newAcceptCommand(dockerCli), - newDemoteCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newPromoteCommand(dockerCli), - newRemoveCommand(dockerCli), - newTasksCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} - -// Reference returns the reference of a node. The special value "self" for a node -// reference is mapped to the current node, hence the node ID is retrieved using -// the `/info` endpoint. -func Reference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) { - if ref == "self" { - info, err := client.Info(ctx) - if err != nil { - return "", err - } - return info.Swarm.NodeID, nil - } - return ref, nil -} diff --git a/api/client/node/demote.go b/api/client/node/demote.go deleted file mode 100644 index af6c4c30e..000000000 --- a/api/client/node/demote.go +++ /dev/null @@ -1,31 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -func newDemoteCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "demote NODE [NODE...]", - Short: "Demote a node from manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDemote(dockerCli, args) - }, - } -} - -func runDemote(dockerCli *client.DockerCli, nodes []string) error { - demote := func(node *swarm.Node) { - node.Spec.Role = swarm.NodeRoleWorker - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, demote, success) -} diff --git a/api/client/node/inspect.go b/api/client/node/inspect.go deleted file mode 100644 index 60f4683b2..000000000 --- a/api/client/node/inspect.go +++ /dev/null @@ -1,141 +0,0 @@ -package node - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - nodeIds []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] self|NODE [NODE...]", - Short: "Display detailed information on one or more nodes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIds = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - getRef := func(ref string) (interface{}, []byte, error) { - nodeRef, err := Reference(client, ctx, ref) - if err != nil { - return nil, nil, err - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - return node, nil, err - } - - if !opts.pretty { - return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) - } - return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) -} - -func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { - for idx, ref := range refs { - obj, _, err := getRef(ref) - if err != nil { - return err - } - printNode(out, obj.(swarm.Node)) - - // TODO: better way to do this? - // print extra space between objects, but not after the last one - if idx+1 != len(refs) { - fmt.Fprintf(out, "\n\n") - } - } - return nil -} - -// TODO: use a template -func printNode(out io.Writer, node swarm.Node) { - fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) - ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) - if node.Spec.Labels != nil { - fmt.Fprintln(out, "Labels:") - for k, v := range node.Spec.Labels { - fmt.Fprintf(out, " - %s = %s\n", k, v) - } - } - - ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) - fmt.Fprintln(out, "Status:") - fmt.Fprintf(out, " State:\t\t\t%s\n", client.PrettyPrint(node.Status.State)) - ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", client.PrettyPrint(node.Status.Message)) - fmt.Fprintf(out, " Availability:\t\t%s\n", client.PrettyPrint(node.Spec.Availability)) - - if node.ManagerStatus != nil { - fmt.Fprintln(out, "Manager Status:") - fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) - fmt.Fprintf(out, " Raft Status:\t\t%s\n", client.PrettyPrint(node.ManagerStatus.Reachability)) - leader := "No" - if node.ManagerStatus.Leader { - leader = "Yes" - } - fmt.Fprintf(out, " Leader:\t\t%s\n", leader) - } - - fmt.Fprintln(out, "Platform:") - fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) - fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) - - fmt.Fprintln(out, "Resources:") - fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) - fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) - - var pluginTypes []string - pluginNamesByType := map[string][]string{} - for _, p := range node.Description.Engine.Plugins { - // append to pluginTypes only if not done previously - if _, ok := pluginNamesByType[p.Type]; !ok { - pluginTypes = append(pluginTypes, p.Type) - } - pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) - } - - if len(pluginTypes) > 0 { - fmt.Fprintln(out, "Plugins:") - sort.Strings(pluginTypes) // ensure stable output - for _, pluginType := range pluginTypes { - fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) - } - } - fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) - - if len(node.Description.Engine.Labels) != 0 { - fmt.Fprintln(out, "Engine Labels:") - for k, v := range node.Description.Engine.Labels { - fmt.Fprintf(out, " - %s = %s", k, v) - } - } - -} diff --git a/api/client/node/list.go b/api/client/node/list.go deleted file mode 100644 index 16370056c..000000000 --- a/api/client/node/list.go +++ /dev/null @@ -1,113 +0,0 @@ -package node - -import ( - "fmt" - "io" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List nodes in the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - nodes, err := client.NodeList( - ctx, - types.NodeListOptions{Filter: opts.filter.Value()}) - if err != nil { - return err - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - out := dockerCli.Out() - if opts.quiet { - printQuiet(out, nodes) - } else { - printTable(out, nodes, info) - } - return nil -} - -func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS") - for _, node := range nodes { - name := node.Description.Hostname - availability := string(node.Spec.Availability) - membership := string(node.Spec.Membership) - - reachability := "" - if node.ManagerStatus != nil { - if node.ManagerStatus.Leader { - reachability = "Leader" - } else { - reachability = string(node.ManagerStatus.Reachability) - } - } - - ID := node.ID - if node.ID == info.Swarm.NodeID { - ID = ID + " *" - } - - fmt.Fprintf( - writer, - listItemFmt, - ID, - name, - client.PrettyPrint(membership), - client.PrettyPrint(string(node.Status.State)), - client.PrettyPrint(availability), - client.PrettyPrint(reachability)) - } -} - -func printQuiet(out io.Writer, nodes []swarm.Node) { - for _, node := range nodes { - fmt.Fprintln(out, node.ID) - } -} diff --git a/api/client/node/opts.go b/api/client/node/opts.go deleted file mode 100644 index cd160252d..000000000 --- a/api/client/node/opts.go +++ /dev/null @@ -1,50 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types/swarm" -) - -type nodeOptions struct { - role string - membership string - availability string -} - -func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { - var spec swarm.NodeSpec - - switch swarm.NodeRole(strings.ToLower(opts.role)) { - case swarm.NodeRoleWorker: - spec.Role = swarm.NodeRoleWorker - case swarm.NodeRoleManager: - spec.Role = swarm.NodeRoleManager - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) - } - - switch swarm.NodeMembership(strings.ToLower(opts.membership)) { - case swarm.NodeMembershipAccepted: - spec.Membership = swarm.NodeMembershipAccepted - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid membership %q, only accepted is supported", opts.membership) - } - - switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { - case swarm.NodeAvailabilityActive: - spec.Availability = swarm.NodeAvailabilityActive - case swarm.NodeAvailabilityPause: - spec.Availability = swarm.NodeAvailabilityPause - case swarm.NodeAvailabilityDrain: - spec.Availability = swarm.NodeAvailabilityDrain - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) - } - - return spec, nil -} diff --git a/api/client/node/promote.go b/api/client/node/promote.go deleted file mode 100644 index b3e0f4d7e..000000000 --- a/api/client/node/promote.go +++ /dev/null @@ -1,31 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -func newPromoteCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "promote NODE [NODE...]", - Short: "Promote a node to a manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPromote(dockerCli, args) - }, - } -} - -func runPromote(dockerCli *client.DockerCli, nodes []string) error { - promote := func(node *swarm.Node) { - node.Spec.Role = swarm.NodeRoleManager - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, promote, success) -} diff --git a/api/client/node/remove.go b/api/client/node/remove.go deleted file mode 100644 index 540194062..000000000 --- a/api/client/node/remove.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm NODE [NODE...]", - Aliases: []string{"remove"}, - Short: "Remove a node from the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *client.DockerCli, args []string) error { - client := dockerCli.Client() - ctx := context.Background() - for _, nodeID := range args { - err := client.NodeRemove(ctx, nodeID) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) - } - return nil -} diff --git a/api/client/node/tasks.go b/api/client/node/tasks.go deleted file mode 100644 index 87b53e3f8..000000000 --- a/api/client/node/tasks.go +++ /dev/null @@ -1,71 +0,0 @@ -package node - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -type tasksOptions struct { - nodeID string - all bool - noResolve bool - filter opts.FilterOpt -} - -func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := tasksOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "tasks [OPTIONS] self|NODE", - Short: "List tasks running on a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeID = args[0] - return runTasks(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Display all instances") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - nodeRef, err := Reference(client, ctx, opts.nodeID) - if err != nil { - return nil - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - if err != nil { - return err - } - - filter := opts.filter.Value() - filter.Add("node", node.ID) - if !opts.all && !filter.Include("desired-state") { - filter.Add("desired-state", string(swarm.TaskStateRunning)) - filter.Add("desired-state", string(swarm.TaskStateAccepted)) - } - - tasks, err := client.TaskList( - ctx, - types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/node/update.go b/api/client/node/update.go deleted file mode 100644 index 840182de0..000000000 --- a/api/client/node/update.go +++ /dev/null @@ -1,83 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts nodeOptions - - cmd := &cobra.Command{ - Use: "update [OPTIONS] NODE", - Short: "Update a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.role, flagRole, "", "Role of the node (worker/manager)") - flags.StringVar(&opts.membership, flagMembership, "", "Membership of the node (accepted/rejected)") - flags.StringVar(&opts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") - return cmd -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, nodeID string) error { - success := func(_ string) { - fmt.Fprintln(dockerCli.Out(), nodeID) - } - return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) -} - -func updateNodes(dockerCli *client.DockerCli, nodes []string, mergeNode func(node *swarm.Node), success func(nodeID string)) error { - client := dockerCli.Client() - ctx := context.Background() - - for _, nodeID := range nodes { - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - mergeNode(&node) - err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) - if err != nil { - return err - } - success(nodeID) - } - return nil -} - -func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) { - return func(node *swarm.Node) { - spec := &node.Spec - - if flags.Changed(flagRole) { - str, _ := flags.GetString(flagRole) - spec.Role = swarm.NodeRole(str) - } - if flags.Changed(flagMembership) { - str, _ := flags.GetString(flagMembership) - spec.Membership = swarm.NodeMembership(str) - } - if flags.Changed(flagAvailability) { - str, _ := flags.GetString(flagAvailability) - spec.Availability = swarm.NodeAvailability(str) - } - } -} - -const ( - flagRole = "role" - flagMembership = "membership" - flagAvailability = "availability" -) diff --git a/api/client/plugin/cmd.go b/api/client/plugin/cmd.go deleted file mode 100644 index f9ecc519f..000000000 --- a/api/client/plugin/cmd.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !experimental - -package plugin - -import ( - "github.com/docker/docker/api/client" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(cmd *cobra.Command, dockerCli *client.DockerCli) { -} diff --git a/api/client/plugin/cmd_experimental.go b/api/client/plugin/cmd_experimental.go deleted file mode 100644 index 6c991937f..000000000 --- a/api/client/plugin/cmd_experimental.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(rootCmd *cobra.Command, dockerCli *client.DockerCli) { - cmd := &cobra.Command{ - Use: "plugin", - Short: "Manage Docker plugins", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - - cmd.AddCommand( - newDisableCommand(dockerCli), - newEnableCommand(dockerCli), - newInspectCommand(dockerCli), - newInstallCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newSetCommand(dockerCli), - newPushCommand(dockerCli), - ) - - rootCmd.AddCommand(cmd) -} diff --git a/api/client/plugin/disable.go b/api/client/plugin/disable.go deleted file mode 100644 index 058c6889c..000000000 --- a/api/client/plugin/disable.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newDisableCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "disable PLUGIN", - Short: "Disable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDisable(dockerCli, args[0]) - }, - } - - return cmd -} - -func runDisable(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - return dockerCli.Client().PluginDisable(context.Background(), ref.String()) -} diff --git a/api/client/plugin/enable.go b/api/client/plugin/enable.go deleted file mode 100644 index cc2488bce..000000000 --- a/api/client/plugin/enable.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newEnableCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "enable PLUGIN", - Short: "Enable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runEnable(dockerCli, args[0]) - }, - } - - return cmd -} - -func runEnable(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - return dockerCli.Client().PluginEnable(context.Background(), ref.String()) -} diff --git a/api/client/plugin/inspect.go b/api/client/plugin/inspect.go deleted file mode 100644 index 8f7e98d44..000000000 --- a/api/client/plugin/inspect.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build experimental - -package plugin - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "inspect PLUGIN", - Short: "Inspect a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runInspect(dockerCli, args[0]) - }, - } - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - p, err := dockerCli.Client().PluginInspect(context.Background(), ref.String()) - if err != nil { - return err - } - - b, err := json.MarshalIndent(p, "", "\t") - if err != nil { - return err - } - _, err = dockerCli.Out().Write(b) - return err -} diff --git a/api/client/plugin/install.go b/api/client/plugin/install.go deleted file mode 100644 index 310e7229d..000000000 --- a/api/client/plugin/install.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build experimental - -package plugin - -import ( - "bufio" - "fmt" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pluginOptions struct { - name string - grantPerms bool - disable bool -} - -func newInstallCommand(dockerCli *client.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "install PLUGIN", - Short: "Install a plugin", - Args: cli.RequiresMinArgs(1), // TODO: allow for set args - RunE: func(cmd *cobra.Command, args []string) error { - options.name = args[0] - return runInstall(dockerCli, options) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "grant all permissions necessary to run the plugin") - flags.BoolVar(&options.disable, "disable", false, "do not enable the plugin on install") - - return cmd -} - -func runInstall(dockerCli *client.DockerCli, opts pluginOptions) error { - named, err := reference.ParseNamed(opts.name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - registryAuthFunc := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "plugin install") - - options := types.PluginInstallOptions{ - RegistryAuth: encodedAuth, - Disabled: opts.disable, - AcceptAllPermissions: opts.grantPerms, - AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name), - // TODO: Rename PrivilegeFunc, it has nothing to do with privileges - PrivilegeFunc: registryAuthFunc, - } - - return dockerCli.Client().PluginInstall(ctx, ref.String(), options) -} - -func acceptPrivileges(dockerCli *client.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { - return func(privileges types.PluginPrivileges) (bool, error) { - fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) - for _, privilege := range privileges { - fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) - } - - fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") - reader := bufio.NewReader(dockerCli.In()) - line, _, err := reader.ReadLine() - if err != nil { - return false, err - } - return strings.ToLower(string(line)) == "y", nil - } -} diff --git a/api/client/plugin/list.go b/api/client/plugin/list.go deleted file mode 100644 index d6c788ca7..000000000 --- a/api/client/plugin/list.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - "text/tabwriter" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "ls", - Short: "List plugins", - Aliases: []string{"list"}, - Args: cli.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli) - }, - } - - return cmd -} - -func runList(dockerCli *client.DockerCli) error { - plugins, err := dockerCli.Client().PluginList(context.Background()) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME \tTAG \tACTIVE") - fmt.Fprintf(w, "\n") - - for _, p := range plugins { - fmt.Fprintf(w, "%s\t%s\t%v\n", p.Name, p.Tag, p.Active) - } - w.Flush() - return nil -} diff --git a/api/client/plugin/push.go b/api/client/plugin/push.go deleted file mode 100644 index 9ef490796..000000000 --- a/api/client/plugin/push.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -func newPushCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push PLUGIN", - Short: "Push a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - return cmd -} - -func runPush(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - return dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) -} diff --git a/api/client/plugin/remove.go b/api/client/plugin/remove.go deleted file mode 100644 index a8e52a4f2..000000000 --- a/api/client/plugin/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "rm PLUGIN", - Short: "Remove a plugin", - Aliases: []string{"remove"}, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, names []string) error { - var errs cli.Errors - for _, name := range names { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - // TODO: pass names to api instead of making multiple api calls - if err := dockerCli.Client().PluginRemove(context.Background(), ref.String()); err != nil { - errs = append(errs, err) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. - if errs != nil { - return errs - } - return nil -} diff --git a/api/client/plugin/set.go b/api/client/plugin/set.go deleted file mode 100644 index 188bd63cc..000000000 --- a/api/client/plugin/set.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" -) - -func newSetCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "set PLUGIN key1=value1 [key2=value2...]", - Short: "Change settings for a plugin", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runSet(dockerCli, args[0], args[1:]) - }, - } - - return cmd -} - -func runSet(dockerCli *client.DockerCli, name string, args []string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - return dockerCli.Client().PluginSet(context.Background(), ref.String(), args) -} diff --git a/api/client/registry.go b/api/client/registry.go deleted file mode 100644 index e67ea7d9c..000000000 --- a/api/client/registry.go +++ /dev/null @@ -1,188 +0,0 @@ -package client - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func (cli *DockerCli) ElectAuthServer(ctx context.Context) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.client.Info(ctx); err != nil { - fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func (cli *DockerCli) RegistryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - authConfig, err := cli.ConfigureAuth("", "", indexServer, false) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func (cli *DockerCli) ResolveAuthConfig(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = cli.ElectAuthServer(ctx) - } - - a, _ := GetCredentials(cli.configFile, configKey) - return a -} - -// RetrieveAuthConfigs return all credentials. -func (cli *DockerCli) RetrieveAuthConfigs() map[string]types.AuthConfig { - acs, _ := GetAllCredentials(cli.configFile) - return acs -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func (cli *DockerCli) ConfigureAuth(flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.in = os.Stdin - } - - authconfig, err := GetCredentials(cli.configFile, serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.isTerminalIn { - return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.out, "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - cli.promptWithDefault("Username", authconfig.Username) - flUser = readInput(cli.in, cli.out) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, fmt.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.inFd) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.inFd, oldState) - - flPassword = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.inFd, oldState) - if flPassword == "" { - return authconfig, fmt.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func (cli *DockerCli) resolveAuthConfigFromImage(ctx context.Context, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - authConfig := cli.ResolveAuthConfig(ctx, repoInfo.Index) - return authConfig, nil -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func (cli *DockerCli) RetrieveAuthTokenFromImage(ctx context.Context, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := cli.resolveAuthConfigFromImage(ctx, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} diff --git a/api/client/registry/login.go b/api/client/registry/login.go deleted file mode 100644 index 452ac7151..000000000 --- a/api/client/registry/login.go +++ /dev/null @@ -1,81 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type loginOptions struct { - serverAddress string - user string - password string - email string -} - -// NewLoginCommand creates a new `docker login` command -func NewLoginCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts loginOptions - - cmd := &cobra.Command{ - Use: "login [OPTIONS] [SERVER]", - Short: "Log in to a Docker registry.", - Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.serverAddress = args[0] - } - return runLogin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.user, "username", "u", "", "Username") - flags.StringVarP(&opts.password, "password", "p", "", "Password") - - // Deprecated in 1.11: Should be removed in docker 1.13 - flags.StringVarP(&opts.email, "email", "e", "", "Email") - flags.MarkDeprecated("email", "will be removed in 1.13.") - - return cmd -} - -func runLogin(dockerCli *client.DockerCli, opts loginOptions) error { - ctx := context.Background() - clnt := dockerCli.Client() - - var serverAddress string - var isDefaultRegistry bool - if opts.serverAddress != "" { - serverAddress = opts.serverAddress - } else { - serverAddress = dockerCli.ElectAuthServer(ctx) - isDefaultRegistry = true - } - authConfig, err := dockerCli.ConfigureAuth(opts.user, opts.password, serverAddress, isDefaultRegistry) - if err != nil { - return err - } - response, err := clnt.RegistryLogin(ctx, authConfig) - if err != nil { - return err - } - if response.IdentityToken != "" { - authConfig.Password = "" - authConfig.IdentityToken = response.IdentityToken - } - if err := client.StoreCredentials(dockerCli.ConfigFile(), authConfig); err != nil { - return fmt.Errorf("Error saving credentials: %v", err) - } - - if response.Status != "" { - fmt.Fprintln(dockerCli.Out(), response.Status) - } - return nil -} diff --git a/api/client/registry/logout.go b/api/client/registry/logout.go deleted file mode 100644 index dad27644a..000000000 --- a/api/client/registry/logout.go +++ /dev/null @@ -1,52 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewLogoutCommand creates a new `docker login` command -func NewLogoutCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "logout [SERVER]", - Short: "Log out from a Docker registry.", - Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - var serverAddress string - if len(args) > 0 { - serverAddress = args[0] - } - return runLogout(dockerCli, serverAddress) - }, - } - - return cmd -} - -func runLogout(dockerCli *client.DockerCli, serverAddress string) error { - ctx := context.Background() - - if serverAddress == "" { - serverAddress = dockerCli.ElectAuthServer(ctx) - } - - // check if we're logged in based on the records in the config file - // which means it couldn't have user/pass cause they may be in the creds store - if _, ok := dockerCli.ConfigFile().AuthConfigs[serverAddress]; !ok { - fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", serverAddress) - return nil - } - - fmt.Fprintf(dockerCli.Out(), "Remove login credentials for %s\n", serverAddress) - if err := client.EraseCredentials(dockerCli.ConfigFile(), serverAddress); err != nil { - fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) - } - - return nil -} diff --git a/api/client/service/cmd.go b/api/client/service/cmd.go deleted file mode 100644 index 87c4a3e09..000000000 --- a/api/client/service/cmd.go +++ /dev/null @@ -1,32 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewServiceCommand returns a cobra command for `service` subcommands -func NewServiceCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "service", - Short: "Manage Docker services", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newTasksCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newScaleCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/service/create.go b/api/client/service/create.go deleted file mode 100644 index f6210be28..000000000 --- a/api/client/service/create.go +++ /dev/null @@ -1,71 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new service", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - if len(args) > 1 { - opts.args = args[1:] - } - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") - addServiceFlags(cmd, opts) - - flags.VarP(&opts.labels, flagLabel, "l", "Service labels") - flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") - flags.Var(&opts.mounts, flagMount, "Attach a mount to the service") - flags.StringSliceVar(&opts.constraints, flagConstraint, []string{}, "Placement constraints") - flags.StringSliceVar(&opts.networks, flagNetwork, []string{}, "Network attachments") - flags.VarP(&opts.endpoint.ports, flagPublish, "p", "Publish a port as a node port") - - flags.SetInterspersed(false) - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error { - apiClient := dockerCli.Client() - createOpts := types.ServiceCreateOptions{} - - service, err := opts.ToService() - if err != nil { - return err - } - - ctx := context.Background() - - // only send auth if flag was set - if opts.registryAuth { - // Retrieve encoded auth token from the image reference - encodedAuth, err := dockerCli.RetrieveAuthTokenFromImage(ctx, opts.image) - if err != nil { - return err - } - createOpts.EncodedRegistryAuth = encodedAuth - } - - response, err := apiClient.ServiceCreate(ctx, service, createOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} diff --git a/api/client/service/inspect.go b/api/client/service/inspect.go deleted file mode 100644 index dc23ae617..000000000 --- a/api/client/service/inspect.go +++ /dev/null @@ -1,171 +0,0 @@ -package service - -import ( - "fmt" - "io" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - apiclient "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - refs []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SERVICE [SERVICE...]", - Short: "Display detailed information on one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - - if opts.pretty && len(opts.format) > 0 { - return fmt.Errorf("--format is incompatible with human friendly format") - } - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRef := func(ref string) (interface{}, []byte, error) { - service, _, err := client.ServiceInspectWithRaw(ctx, ref) - if err == nil || !apiclient.IsErrServiceNotFound(err) { - return service, nil, err - } - return nil, nil, fmt.Errorf("Error: no such service: %s", ref) - } - - if !opts.pretty { - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRef) - } - - return printHumanFriendly(dockerCli.Out(), opts.refs, getRef) -} - -func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { - for idx, ref := range refs { - obj, _, err := getRef(ref) - if err != nil { - return err - } - printService(out, obj.(swarm.Service)) - - // TODO: better way to do this? - // print extra space between objects, but not after the last one - if idx+1 != len(refs) { - fmt.Fprintf(out, "\n\n") - } - } - return nil -} - -// TODO: use a template -func printService(out io.Writer, service swarm.Service) { - fmt.Fprintf(out, "ID:\t\t%s\n", service.ID) - fmt.Fprintf(out, "Name:\t\t%s\n", service.Spec.Name) - if service.Spec.Labels != nil { - fmt.Fprintln(out, "Labels:") - for k, v := range service.Spec.Labels { - fmt.Fprintf(out, " - %s=%s\n", k, v) - } - } - - if service.Spec.Mode.Global != nil { - fmt.Fprintln(out, "Mode:\t\tGlobal") - } else { - fmt.Fprintln(out, "Mode:\t\tReplicated") - if service.Spec.Mode.Replicated.Replicas != nil { - fmt.Fprintf(out, " Replicas:\t%d\n", *service.Spec.Mode.Replicated.Replicas) - } - } - fmt.Fprintln(out, "Placement:") - if service.Spec.TaskTemplate.Placement != nil && len(service.Spec.TaskTemplate.Placement.Constraints) > 0 { - ioutils.FprintfIfNotEmpty(out, " Constraints\t: %s\n", strings.Join(service.Spec.TaskTemplate.Placement.Constraints, ", ")) - } - fmt.Fprintf(out, "UpdateConfig:\n") - fmt.Fprintf(out, " Parallelism:\t%d\n", service.Spec.UpdateConfig.Parallelism) - if service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 { - fmt.Fprintf(out, " Delay:\t\t%s\n", service.Spec.UpdateConfig.Delay) - } - fmt.Fprintf(out, "ContainerSpec:\n") - printContainerSpec(out, service.Spec.TaskTemplate.ContainerSpec) - - resources := service.Spec.TaskTemplate.Resources - if resources != nil { - fmt.Fprintln(out, "Resources:") - printResources := func(out io.Writer, requirement string, r *swarm.Resources) { - if r == nil || (r.MemoryBytes == 0 && r.NanoCPUs == 0) { - return - } - fmt.Fprintf(out, " %s:\n", requirement) - if r.NanoCPUs != 0 { - fmt.Fprintf(out, " CPU:\t\t%g\n", float64(r.NanoCPUs)/1e9) - } - if r.MemoryBytes != 0 { - fmt.Fprintf(out, " Memory:\t%s\n", units.BytesSize(float64(r.MemoryBytes))) - } - } - printResources(out, "Reservations", resources.Reservations) - printResources(out, "Limits", resources.Limits) - } - if len(service.Spec.Networks) > 0 { - fmt.Fprintf(out, "Networks:") - for _, n := range service.Spec.Networks { - fmt.Fprintf(out, " %s", n.Target) - } - } - - if len(service.Endpoint.Ports) > 0 { - fmt.Fprintln(out, "Ports:") - for _, port := range service.Endpoint.Ports { - fmt.Fprintf(out, " Name = %s\n", port.Name) - fmt.Fprintf(out, " Protocol = %s\n", port.Protocol) - fmt.Fprintf(out, " TargetPort = %d\n", port.TargetPort) - fmt.Fprintf(out, " PublishedPort = %d\n", port.PublishedPort) - } - } -} - -func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) { - fmt.Fprintf(out, " Image:\t\t%s\n", containerSpec.Image) - if len(containerSpec.Args) > 0 { - fmt.Fprintf(out, " Args:\t\t%s\n", strings.Join(containerSpec.Args, " ")) - } - if len(containerSpec.Env) > 0 { - fmt.Fprintf(out, " Env:\t\t%s\n", strings.Join(containerSpec.Env, " ")) - } - ioutils.FprintfIfNotEmpty(out, " Dir\t\t%s\n", containerSpec.Dir) - ioutils.FprintfIfNotEmpty(out, " User\t\t%s\n", containerSpec.User) - if len(containerSpec.Mounts) > 0 { - fmt.Fprintln(out, " Mounts:") - for _, v := range containerSpec.Mounts { - fmt.Fprintf(out, " Target = %s\n", v.Target) - fmt.Fprintf(out, " Source = %s\n", v.Source) - fmt.Fprintf(out, " ReadOnly = %v\n", v.ReadOnly) - fmt.Fprintf(out, " Type = %v\n", v.Type) - } - } -} diff --git a/api/client/service/list.go b/api/client/service/list.go deleted file mode 100644 index ab4d4ee80..000000000 --- a/api/client/service/list.go +++ /dev/null @@ -1,124 +0,0 @@ -package service - -import ( - "fmt" - "io" - "strings" - "text/tabwriter" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List services", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()}) - if err != nil { - return err - } - - out := dockerCli.Out() - if opts.quiet { - printQuiet(out, services) - } else { - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter}) - if err != nil { - return err - } - - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - activeNodes := make(map[string]struct{}) - for _, n := range nodes { - if n.Status.State == swarm.NodeStateReady { - activeNodes[n.ID] = struct{}{} - } - } - - running := map[string]int{} - for _, task := range tasks { - if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == "running" { - running[task.ServiceID]++ - } - } - - printTable(out, services, running) - } - return nil -} - -func printTable(out io.Writer, services []swarm.Service, running map[string]int) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "REPLICAS", "IMAGE", "COMMAND") - for _, service := range services { - replicas := "" - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) - } else if service.Spec.Mode.Global != nil { - replicas = "global" - } - fmt.Fprintf( - writer, - listItemFmt, - stringid.TruncateID(service.ID), - service.Spec.Name, - replicas, - service.Spec.TaskTemplate.ContainerSpec.Image, - strings.Join(service.Spec.TaskTemplate.ContainerSpec.Args, " ")) - } -} - -func printQuiet(out io.Writer, services []swarm.Service) { - for _, service := range services { - fmt.Fprintln(out, service.ID) - } -} diff --git a/api/client/service/opts.go b/api/client/service/opts.go deleted file mode 100644 index 7c0b0761c..000000000 --- a/api/client/service/opts.go +++ /dev/null @@ -1,523 +0,0 @@ -package service - -import ( - "encoding/csv" - "fmt" - "math/big" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -var ( - // DefaultReplicas is the default replicas to use for a replicated service - DefaultReplicas uint64 = 1 -) - -type int64Value interface { - Value() int64 -} - -type memBytes int64 - -func (m *memBytes) String() string { - return units.BytesSize(float64(m.Value())) -} - -func (m *memBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = memBytes(val) - return err -} - -func (m *memBytes) Type() string { - return "MemoryBytes" -} - -func (m *memBytes) Value() int64 { - return int64(*m) -} - -type nanoCPUs int64 - -func (c *nanoCPUs) String() string { - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -func (c *nanoCPUs) Set(value string) error { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return fmt.Errorf("Failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return fmt.Errorf("value is too precise") - } - *c = nanoCPUs(nano.Num().Int64()) - return nil -} - -func (c *nanoCPUs) Type() string { - return "NanoCPUs" -} - -func (c *nanoCPUs) Value() int64 { - return int64(*c) -} - -// DurationOpt is an option type for time.Duration that uses a pointer. This -// allows us to get nil values outside, instead of defaulting to 0 -type DurationOpt struct { - value *time.Duration -} - -// Set a new value on the option -func (d *DurationOpt) Set(s string) error { - v, err := time.ParseDuration(s) - d.value = &v - return err -} - -// Type returns the type of this option -func (d *DurationOpt) Type() string { - return "duration-ptr" -} - -// String returns a string repr of this option -func (d *DurationOpt) String() string { - if d.value != nil { - return d.value.String() - } - return "none" -} - -// Value returns the time.Duration -func (d *DurationOpt) Value() *time.Duration { - return d.value -} - -// Uint64Opt represents a uint64. -type Uint64Opt struct { - value *uint64 -} - -// Set a new value on the option -func (i *Uint64Opt) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - i.value = &v - return err -} - -// Type returns the type of this option -func (i *Uint64Opt) Type() string { - return "uint64-ptr" -} - -// String returns a string repr of this option -func (i *Uint64Opt) String() string { - if i.value != nil { - return fmt.Sprintf("%v", *i.value) - } - return "none" -} - -// Value returns the uint64 -func (i *Uint64Opt) Value() *uint64 { - return i.value -} - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []swarm.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := swarm.Mount{} - - volumeOptions := func() *swarm.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &swarm.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &swarm.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *swarm.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(swarm.BindOptions) - } - return mount.BindOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) == 1 && strings.ToLower(parts[0]) == "readonly" { - mount.ReadOnly = true - continue - } - - if len(parts) == 1 && strings.ToLower(parts[0]) == "volume-nocopy" { - volumeOptions().NoCopy = true - continue - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key, value := parts[0], parts[1] - switch strings.ToLower(key) { - case "type": - mount.Type = swarm.MountType(strings.ToUpper(value)) - case "source": - mount.Source = value - case "target": - mount.Target = value - case "readonly": - ro, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for readonly: %s", value) - } - mount.ReadOnly = ro - case "bind-propagation": - bindOptions().Propagation = swarm.MountPropagation(strings.ToUpper(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for populate: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-driver-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Source == "" { - return fmt.Errorf("source is required when specifying volume-* options") - } - - if mount.Type == swarm.MountType("BIND") && mount.VolumeOptions != nil { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", swarm.MountTypeBind) - } - if mount.Type == swarm.MountType("VOLUME") && mount.BindOptions != nil { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", swarm.MountTypeVolume) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []swarm.Mount { - return m.values -} - -type updateOptions struct { - parallelism uint64 - delay time.Duration -} - -type resourceOptions struct { - limitCPU nanoCPUs - limitMemBytes memBytes - resCPU nanoCPUs - resMemBytes memBytes -} - -func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { - return &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - NanoCPUs: r.limitCPU.Value(), - MemoryBytes: r.limitMemBytes.Value(), - }, - Reservations: &swarm.Resources{ - NanoCPUs: r.resCPU.Value(), - MemoryBytes: r.resMemBytes.Value(), - }, - } -} - -type restartPolicyOptions struct { - condition string - delay DurationOpt - maxAttempts Uint64Opt - window DurationOpt -} - -func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyCondition(r.condition), - Delay: r.delay.Value(), - MaxAttempts: r.maxAttempts.Value(), - Window: r.window.Value(), - } -} - -func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { - nets := []swarm.NetworkAttachmentConfig{} - for _, network := range networks { - nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) - } - return nets -} - -type endpointOptions struct { - mode string - ports opts.ListOpts -} - -func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { - portConfigs := []swarm.PortConfig{} - // We can ignore errors because the format was already validated by ValidatePort - ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll()) - - for port := range ports { - portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...) - } - - return &swarm.EndpointSpec{ - Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), - Ports: portConfigs, - } -} - -func convertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) []swarm.PortConfig { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - }) - } - return ports -} - -// ValidatePort validates a string is in the expected format for a port definition -func ValidatePort(value string) (string, error) { - portMappings, err := nat.ParsePortSpec(value) - for _, portMapping := range portMappings { - if portMapping.Binding.HostIP != "" { - return "", fmt.Errorf("HostIP is not supported by a service.") - } - } - return value, err -} - -type serviceOptions struct { - name string - labels opts.ListOpts - image string - args []string - env opts.ListOpts - workdir string - user string - mounts MountOpt - - resources resourceOptions - stopGrace DurationOpt - - replicas Uint64Opt - mode string - - restartPolicy restartPolicyOptions - constraints []string - update updateOptions - networks []string - endpoint endpointOptions - - registryAuth bool -} - -func newServiceOptions() *serviceOptions { - return &serviceOptions{ - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - env: opts.NewListOpts(runconfigopts.ValidateEnv), - endpoint: endpointOptions{ - ports: opts.NewListOpts(ValidatePort), - }, - } -} - -func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { - var service swarm.ServiceSpec - - service = swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: opts.image, - Args: opts.args, - Env: opts.env.GetAll(), - Dir: opts.workdir, - User: opts.user, - Mounts: opts.mounts.Value(), - StopGracePeriod: opts.stopGrace.Value(), - }, - Resources: opts.resources.ToResourceRequirements(), - RestartPolicy: opts.restartPolicy.ToRestartPolicy(), - Placement: &swarm.Placement{ - Constraints: opts.constraints, - }, - }, - Mode: swarm.ServiceMode{}, - UpdateConfig: &swarm.UpdateConfig{ - Parallelism: opts.update.parallelism, - Delay: opts.update.delay, - }, - Networks: convertNetworks(opts.networks), - EndpointSpec: opts.endpoint.ToEndpointSpec(), - } - - switch opts.mode { - case "global": - if opts.replicas.Value() != nil { - return service, fmt.Errorf("replicas can only be used with replicated mode") - } - - service.Mode.Global = &swarm.GlobalService{} - case "replicated": - service.Mode.Replicated = &swarm.ReplicatedService{ - Replicas: opts.replicas.Value(), - } - default: - return service, fmt.Errorf("Unknown mode: %s", opts.mode) - } - return service, nil -} - -// addServiceFlags adds all flags that are common to both `create` and `update`. -// Any flags that are not common are added separately in the individual command -func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { - flags := cmd.Flags() - flags.StringVar(&opts.name, flagName, "", "Service name") - - flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container") - flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID") - - flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") - flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") - flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") - flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") - flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container") - - flags.Var(&opts.replicas, flagReplicas, "Number of tasks") - - flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") - flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts") - flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") - flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy") - - flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 0, "Maximum number of tasks updated simultaneously") - flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates") - - flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") - - flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to Swarm agents") -} - -const ( - flagConstraint = "constraint" - flagConstraintRemove = "constraint-rm" - flagConstraintAdd = "constraint-add" - flagEndpointMode = "endpoint-mode" - flagEnv = "env" - flagEnvRemove = "env-rm" - flagEnvAdd = "env-add" - flagLabel = "label" - flagLabelRemove = "label-rm" - flagLabelAdd = "label-add" - flagLimitCPU = "limit-cpu" - flagLimitMemory = "limit-memory" - flagMode = "mode" - flagMount = "mount" - flagMountRemove = "mount-rm" - flagMountAdd = "mount-add" - flagName = "name" - flagNetwork = "network" - flagNetworkRemove = "network-rm" - flagNetworkAdd = "network-add" - flagPublish = "publish" - flagPublishRemove = "publish-rm" - flagPublishAdd = "publish-add" - flagReplicas = "replicas" - flagReserveCPU = "reserve-cpu" - flagReserveMemory = "reserve-memory" - flagRestartCondition = "restart-condition" - flagRestartDelay = "restart-delay" - flagRestartMaxAttempts = "restart-max-attempts" - flagRestartWindow = "restart-window" - flagStopGracePeriod = "stop-grace-period" - flagUpdateDelay = "update-delay" - flagUpdateParallelism = "update-parallelism" - flagUser = "user" - flagRegistryAuth = "registry-auth" -) diff --git a/api/client/service/opts_test.go b/api/client/service/opts_test.go deleted file mode 100644 index c2a17115f..000000000 --- a/api/client/service/opts_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package service - -import ( - "testing" - "time" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/engine-api/types/swarm" -) - -func TestMemBytesString(t *testing.T) { - var mem memBytes = 1048576 - assert.Equal(t, mem.String(), "1 MiB") -} - -func TestMemBytesSetAndValue(t *testing.T) { - var mem memBytes - assert.NilError(t, mem.Set("5kb")) - assert.Equal(t, mem.Value(), int64(5120)) -} - -func TestNanoCPUsString(t *testing.T) { - var cpus nanoCPUs = 6100000000 - assert.Equal(t, cpus.String(), "6.100") -} - -func TestNanoCPUsSetAndValue(t *testing.T) { - var cpus nanoCPUs - assert.NilError(t, cpus.Set("0.35")) - assert.Equal(t, cpus.Value(), int64(350000000)) -} - -func TestDurationOptString(t *testing.T) { - dur := time.Duration(300 * 10e8) - duration := DurationOpt{value: &dur} - assert.Equal(t, duration.String(), "5m0s") -} - -func TestDurationOptSetAndValue(t *testing.T) { - var duration DurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) -} - -func TestUint64OptString(t *testing.T) { - value := uint64(2345678) - opt := Uint64Opt{value: &value} - assert.Equal(t, opt.String(), "2345678") - - opt = Uint64Opt{} - assert.Equal(t, opt.String(), "none") -} - -func TestUint64OptSetAndValue(t *testing.T) { - var opt Uint64Opt - assert.NilError(t, opt.Set("14445")) - assert.Equal(t, *opt.Value(), uint64(14445)) -} - -func TestMountOptString(t *testing.T) { - mount := MountOpt{ - values: []swarm.Mount{ - { - Type: swarm.MountType("BIND"), - Source: "/home/path", - Target: "/target", - }, - { - Type: swarm.MountType("VOLUME"), - Source: "foo", - Target: "/target/foo", - }, - }, - } - expected := "BIND /home/path /target, VOLUME foo /target/foo" - assert.Equal(t, mount.String(), expected) -} - -func TestMountOptSetNoError(t *testing.T) { - var mount MountOpt - assert.NilError(t, mount.Set("type=bind,target=/target,source=/foo")) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], swarm.Mount{ - Type: swarm.MountType("BIND"), - Source: "/foo", - Target: "/target", - }) -} - -func TestMountOptSetErrorNoType(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("target=/target,source=/foo"), "type is required") -} - -func TestMountOptSetErrorNoTarget(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=VOLUME,source=/foo"), "target is required") -} - -func TestMountOptSetErrorInvalidKey(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=VOLUME,bogus=foo"), "unexpected key 'bogus'") -} - -func TestMountOptSetErrorInvalidField(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=VOLUME,bogus"), "invalid field 'bogus'") -} - -func TestMountOptSetErrorInvalidWritable(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=VOLUME,readonly=no"), "invalid value for readonly: no") -} - -func TestMountOptDefaultEnableWritable(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) - assert.Equal(t, m.values[0].ReadOnly, false) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) - assert.Equal(t, m.values[0].ReadOnly, false) -} - -func TestMountOptVolumeNoCopy(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=volume,target=/foo,volume-nocopy"), "source is required") - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) - assert.Equal(t, m.values[0].VolumeOptions == nil, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) -} - -func TestMountOptTypeConflict(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") - assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") -} diff --git a/api/client/service/remove.go b/api/client/service/remove.go deleted file mode 100644 index acbdae0f2..000000000 --- a/api/client/service/remove.go +++ /dev/null @@ -1,47 +0,0 @@ -package service - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] SERVICE", - Aliases: []string{"remove"}, - Short: "Remove a service", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - cmd.Flags() - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, sids []string) error { - client := dockerCli.Client() - - ctx := context.Background() - - var errs []string - for _, sid := range sids { - err := client.ServiceRemove(ctx, sid) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", sid) - } - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/service/scale.go b/api/client/service/scale.go deleted file mode 100644 index 1a3ea80ea..000000000 --- a/api/client/service/scale.go +++ /dev/null @@ -1,88 +0,0 @@ -package service - -import ( - "fmt" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -func newScaleCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", - Short: "Scale one or multiple services", - Args: scaleArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runScale(dockerCli, args) - }, - } -} - -func scaleArgs(cmd *cobra.Command, args []string) error { - if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { - return err - } - for _, arg := range args { - if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { - return fmt.Errorf( - "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - arg, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } - } - return nil -} - -func runScale(dockerCli *client.DockerCli, args []string) error { - var errors []string - for _, arg := range args { - parts := strings.SplitN(arg, "=", 2) - serviceID, scale := parts[0], parts[1] - if err := runServiceScale(dockerCli, serviceID, scale); err != nil { - errors = append(errors, fmt.Sprintf("%s: %s", serviceID, err.Error())) - } - } - - if len(errors) == 0 { - return nil - } - return fmt.Errorf(strings.Join(errors, "\n")) -} - -func runServiceScale(dockerCli *client.DockerCli, serviceID string, scale string) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) - - if err != nil { - return err - } - - serviceMode := &service.Spec.Mode - if serviceMode.Replicated == nil { - return fmt.Errorf("scale can only be used with replicated mode") - } - uintScale, err := strconv.ParseUint(scale, 10, 64) - if err != nil { - return fmt.Errorf("invalid replicas value %s: %s", scale, err.Error()) - } - serviceMode.Replicated.Replicas = &uintScale - - err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s scaled to %s\n", serviceID, scale) - return nil -} diff --git a/api/client/service/tasks.go b/api/client/service/tasks.go deleted file mode 100644 index 0dde54731..000000000 --- a/api/client/service/tasks.go +++ /dev/null @@ -1,78 +0,0 @@ -package service - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/node" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -type tasksOptions struct { - serviceID string - all bool - noResolve bool - filter opts.FilterOpt -} - -func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := tasksOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "tasks [OPTIONS] SERVICE", - Short: "List the tasks of a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.serviceID = args[0] - return runTasks(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) - if err != nil { - return err - } - - filter := opts.filter.Value() - filter.Add("service", service.ID) - if !opts.all && !filter.Include("desired-state") { - filter.Add("desired-state", string(swarm.TaskStateRunning)) - filter.Add("desired-state", string(swarm.TaskStateAccepted)) - } - - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - nodeReference, err := node.Reference(client, ctx, nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", nodeReference) - } - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/service/update.go b/api/client/service/update.go deleted file mode 100644 index 8ad3f58e9..000000000 --- a/api/client/service/update.go +++ /dev/null @@ -1,388 +0,0 @@ -package service - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" - shlex "github.com/flynn-archive/go-shlex" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] SERVICE", - Short: "Update a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.String("image", "", "Service image tag") - flags.String("args", "", "Service command args") - addServiceFlags(cmd, opts) - - flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") - flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") - flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") - flags.Var(newListOptsVar(), flagPublishRemove, "Remove a published port by its target port") - flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network by name") - flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") - flags.Var(&opts.labels, flagLabelAdd, "Add or update service labels") - flags.Var(&opts.env, flagEnvAdd, "Add or update environment variables") - flags.Var(&opts.mounts, flagMountAdd, "Add or update a mount on a service") - flags.StringSliceVar(&opts.constraints, flagConstraintAdd, []string{}, "Add or update placement constraints") - flags.StringSliceVar(&opts.networks, flagNetworkAdd, []string{}, "Add or update network attachments") - flags.Var(&opts.endpoint.ports, flagPublishAdd, "Add or update a published port") - return cmd -} - -func newListOptsVar() *opts.ListOpts { - return opts.NewListOptsRef(&[]string{}, nil) -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error { - apiClient := dockerCli.Client() - ctx := context.Background() - updateOpts := types.ServiceUpdateOptions{} - - service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) - if err != nil { - return err - } - - err = updateService(flags, &service.Spec) - if err != nil { - return err - } - - // only send auth if flag was set - sendAuth, err := flags.GetBool(flagRegistryAuth) - if err != nil { - return err - } - if sendAuth { - // Retrieve encoded auth token from the image reference - // This would be the old image if it didn't change in this update - image := service.Spec.TaskTemplate.ContainerSpec.Image - encodedAuth, err := dockerCli.RetrieveAuthTokenFromImage(ctx, image) - if err != nil { - return err - } - updateOpts.EncodedRegistryAuth = encodedAuth - } - - err = apiClient.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, updateOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) - return nil -} - -func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { - - updateString := func(flag string, field *string) { - if flags.Changed(flag) { - *field, _ = flags.GetString(flag) - } - } - - updateInt64Value := func(flag string, field *int64) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(int64Value).Value() - } - } - - updateDuration := func(flag string, field *time.Duration) { - if flags.Changed(flag) { - *field, _ = flags.GetDuration(flag) - } - } - - updateDurationOpt := func(flag string, field *time.Duration) { - if flags.Changed(flag) { - *field = *flags.Lookup(flag).Value.(*DurationOpt).Value() - } - } - - updateUint64 := func(flag string, field *uint64) { - if flags.Changed(flag) { - *field, _ = flags.GetUint64(flag) - } - } - - updateUint64Opt := func(flag string, field *uint64) { - if flags.Changed(flag) { - *field = *flags.Lookup(flag).Value.(*Uint64Opt).Value() - } - } - - cspec := &spec.TaskTemplate.ContainerSpec - task := &spec.TaskTemplate - - taskResources := func() *swarm.ResourceRequirements { - if task.Resources == nil { - task.Resources = &swarm.ResourceRequirements{} - } - return task.Resources - } - - updateString(flagName, &spec.Name) - updateLabels(flags, &spec.Labels) - updateString("image", &cspec.Image) - updateStringToSlice(flags, "args", &cspec.Args) - updateEnvironment(flags, &cspec.Env) - updateString("workdir", &cspec.Dir) - updateString(flagUser, &cspec.User) - updateMounts(flags, &cspec.Mounts) - - if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { - taskResources().Limits = &swarm.Resources{} - updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) - updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) - - } - if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { - taskResources().Reservations = &swarm.Resources{} - updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) - updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) - } - - updateDurationOpt(flagStopGracePeriod, cspec.StopGracePeriod) - - if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { - if task.RestartPolicy == nil { - task.RestartPolicy = &swarm.RestartPolicy{} - } - - if flags.Changed(flagRestartCondition) { - value, _ := flags.GetString(flagRestartCondition) - task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) - } - updateDurationOpt(flagRestartDelay, task.RestartPolicy.Delay) - updateUint64Opt(flagRestartMaxAttempts, task.RestartPolicy.MaxAttempts) - updateDurationOpt((flagRestartWindow), task.RestartPolicy.Window) - } - - if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { - if task.Placement == nil { - task.Placement = &swarm.Placement{} - } - updatePlacement(flags, task.Placement) - } - - if err := updateReplicas(flags, &spec.Mode); err != nil { - return err - } - - if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay) { - if spec.UpdateConfig == nil { - spec.UpdateConfig = &swarm.UpdateConfig{} - } - updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) - updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) - } - - updateNetworks(flags, &spec.Networks) - if flags.Changed(flagEndpointMode) { - value, _ := flags.GetString(flagEndpointMode) - spec.EndpointSpec.Mode = swarm.ResolutionMode(value) - } - - if anyChanged(flags, flagPublishAdd, flagPublishRemove) { - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - updatePorts(flags, &spec.EndpointSpec.Ports) - } - return nil -} - -func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { - if !flags.Changed(flag) { - return nil - } - - value, _ := flags.GetString(flag) - valueSlice, err := shlex.Split(value) - *field = valueSlice - return err -} - -func anyChanged(flags *pflag.FlagSet, fields ...string) bool { - for _, flag := range fields { - if flags.Changed(flag) { - return true - } - } - return false -} - -func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { - field, _ := flags.GetStringSlice(flagConstraintAdd) - placement.Constraints = append(placement.Constraints, field...) - - toRemove := buildToRemoveSet(flags, flagConstraintRemove) - placement.Constraints = removeItems(placement.Constraints, toRemove, itemKey) -} - -func updateLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagLabelAdd) { - if field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if field != nil && flags.Changed(flagLabelRemove) { - toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateEnvironment(flags *pflag.FlagSet, field *[]string) { - if flags.Changed(flagEnvAdd) { - value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) - *field = append(*field, value.GetAll()...) - } - toRemove := buildToRemoveSet(flags, flagEnvRemove) - *field = removeItems(*field, toRemove, envKey) -} - -func envKey(value string) string { - kv := strings.SplitN(value, "=", 2) - return kv[0] -} - -func itemKey(value string) string { - return value -} - -func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { - var empty struct{} - toRemove := make(map[string]struct{}) - - if !flags.Changed(flag) { - return toRemove - } - - toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() - for _, key := range toRemoveSlice { - toRemove[key] = empty - } - return toRemove -} - -func removeItems( - seq []string, - toRemove map[string]struct{}, - keyFunc func(string) string, -) []string { - newSeq := []string{} - for _, item := range seq { - if _, exists := toRemove[keyFunc(item)]; !exists { - newSeq = append(newSeq, item) - } - } - return newSeq -} - -func updateMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) { - if flags.Changed(flagMountAdd) { - values := flags.Lookup(flagMountAdd).Value.(*MountOpt).Value() - *mounts = append(*mounts, values...) - } - toRemove := buildToRemoveSet(flags, flagMountRemove) - - newMounts := []swarm.Mount{} - for _, mount := range *mounts { - if _, exists := toRemove[mount.Target]; !exists { - newMounts = append(newMounts, mount) - } - } - *mounts = newMounts -} - -func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) { - if flags.Changed(flagPublishAdd) { - values := flags.Lookup(flagPublishAdd).Value.(*opts.ListOpts).GetAll() - ports, portBindings, _ := nat.ParsePortSpecs(values) - - for port := range ports { - *portConfig = append(*portConfig, convertPortToPortConfig(port, portBindings)...) - } - } - - if !flags.Changed(flagPublishRemove) { - return - } - toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.ListOpts).GetAll() - newPorts := []swarm.PortConfig{} -portLoop: - for _, port := range *portConfig { - for _, rawTargetPort := range toRemove { - targetPort := nat.Port(rawTargetPort) - if equalPort(targetPort, port) { - continue portLoop - } - } - newPorts = append(newPorts, port) - } - *portConfig = newPorts -} - -func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { - return (string(port.Protocol) == targetPort.Proto() && - port.TargetPort == uint32(targetPort.Int())) -} - -func updateNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) { - if flags.Changed(flagNetworkAdd) { - networks, _ := flags.GetStringSlice(flagNetworkAdd) - for _, network := range networks { - *attachments = append(*attachments, swarm.NetworkAttachmentConfig{Target: network}) - } - } - toRemove := buildToRemoveSet(flags, flagNetworkRemove) - newNetworks := []swarm.NetworkAttachmentConfig{} - for _, network := range *attachments { - if _, exists := toRemove[network.Target]; !exists { - newNetworks = append(newNetworks, network) - } - } - *attachments = newNetworks -} - -func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { - if !flags.Changed(flagReplicas) { - return nil - } - - if serviceMode.Replicated == nil { - return fmt.Errorf("replicas can only be used with replicated mode") - } - serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() - return nil -} diff --git a/api/client/service/update_test.go b/api/client/service/update_test.go deleted file mode 100644 index 104a9e53e..000000000 --- a/api/client/service/update_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package service - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/engine-api/types/swarm" -) - -func TestUpdateServiceArgs(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("args", "the \"new args\"") - - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - cspec.Args = []string{"old", "args"} - - updateService(flags, spec) - assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) -} - -func TestUpdateLabels(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-add", "toadd=newlabel") - flags.Set("label-rm", "toremove") - - labels := map[string]string{ - "toremove": "thelabeltoremove", - "tokeep": "value", - } - - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 2) - assert.Equal(t, labels["tokeep"], "value") - assert.Equal(t, labels["toadd"], "newlabel") -} - -func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-rm", "dne") - - labels := map[string]string{"foo": "theoldlabel"} - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 1) -} - -func TestUpdatePlacement(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("constraint-add", "node=toadd") - flags.Set("constraint-rm", "node!=toremove") - - placement := &swarm.Placement{ - Constraints: []string{"node!=toremove", "container=tokeep"}, - } - - updatePlacement(flags, placement) - assert.Equal(t, len(placement.Constraints), 2) - assert.Equal(t, placement.Constraints[0], "container=tokeep") - assert.Equal(t, placement.Constraints[1], "node=toadd") -} - -func TestUpdateEnvironment(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "toadd=newenv") - flags.Set("env-rm", "toremove") - - envs := []string{"toremove=theenvtoremove", "tokeep=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 2) - assert.Equal(t, envs[0], "tokeep=value") - assert.Equal(t, envs[1], "toadd=newenv") -} - -func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "foo=newenv") - flags.Set("env-add", "foo=dupe") - flags.Set("env-rm", "foo") - - envs := []string{"foo=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 0) -} - -func TestUpdateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,target=/toadd") - flags.Set("mount-rm", "/toremove") - - mounts := []swarm.Mount{ - {Target: "/toremove", Type: swarm.MountType("BIND")}, - {Target: "/tokeep", Type: swarm.MountType("BIND")}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 2) - assert.Equal(t, mounts[0].Target, "/tokeep") - assert.Equal(t, mounts[1].Target, "/toadd") -} - -func TestUpdateNetworks(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("network-add", "toadd") - flags.Set("network-rm", "toremove") - - attachments := []swarm.NetworkAttachmentConfig{ - {Target: "toremove", Aliases: []string{"foo"}}, - {Target: "tokeep"}, - } - - updateNetworks(flags, &attachments) - assert.Equal(t, len(attachments), 2) - assert.Equal(t, attachments[0].Target, "tokeep") - assert.Equal(t, attachments[1].Target, "toadd") -} - -func TestUpdatePorts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "1000:1000") - flags.Set("publish-rm", "333/udp") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 333, Protocol: swarm.PortConfigProtocol("udp")}, - {TargetPort: 555}, - } - - updatePorts(flags, &portConfigs) - assert.Equal(t, len(portConfigs), 2) - assert.Equal(t, portConfigs[0].TargetPort, uint32(555)) - assert.Equal(t, portConfigs[1].TargetPort, uint32(1000)) -} diff --git a/api/client/stack/cmd.go b/api/client/stack/cmd.go deleted file mode 100644 index 82a7a1ffa..000000000 --- a/api/client/stack/cmd.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewStackCommand returns a cobra command for `stack` subcommands -func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "stack", - Short: "Manage Docker stacks", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newConfigCommand(dockerCli), - newDeployCommand(dockerCli), - newRemoveCommand(dockerCli), - newTasksCommand(dockerCli), - ) - return cmd -} - -// NewTopLevelDeployCommand returns a command for `docker deploy` -func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := newDeployCommand(dockerCli) - // Remove the aliases at the top level - cmd.Aliases = []string{} - return cmd -} diff --git a/api/client/stack/cmd_stub.go b/api/client/stack/cmd_stub.go deleted file mode 100644 index 0efc56300..000000000 --- a/api/client/stack/cmd_stub.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !experimental - -package stack - -import ( - "github.com/docker/docker/api/client" - "github.com/spf13/cobra" -) - -// NewStackCommand returns no command -func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{} -} - -// NewTopLevelDeployCommand returns no command -func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{} -} diff --git a/api/client/stack/common.go b/api/client/stack/common.go deleted file mode 100644 index 46c995725..000000000 --- a/api/client/stack/common.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build experimental - -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" -) - -const ( - labelNamespace = "com.docker.stack.namespace" -) - -func getStackLabels(namespace string, labels map[string]string) map[string]string { - if labels == nil { - labels = make(map[string]string) - } - labels[labelNamespace] = namespace - return labels -} - -func getStackFilter(namespace string) filters.Args { - filter := filters.NewArgs() - filter.Add("label", labelNamespace+"="+namespace) - return filter -} - -func getServices( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Service, error) { - return apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filter: getStackFilter(namespace)}) -} - -func getNetworks( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]types.NetworkResource, error) { - return apiclient.NetworkList( - ctx, - types.NetworkListOptions{Filters: getStackFilter(namespace)}) -} diff --git a/api/client/stack/config.go b/api/client/stack/config.go deleted file mode 100644 index 696c0c3fc..000000000 --- a/api/client/stack/config.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build experimental - -package stack - -import ( - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/bundlefile" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type configOptions struct { - bundlefile string - namespace string -} - -func newConfigCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts configOptions - - cmd := &cobra.Command{ - Use: "config [OPTIONS] STACK", - Short: "Print the stack configuration", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runConfig(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - return cmd -} - -func runConfig(dockerCli *client.DockerCli, opts configOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - return bundlefile.Print(dockerCli.Out(), bundle) -} diff --git a/api/client/stack/deploy.go b/api/client/stack/deploy.go deleted file mode 100644 index 745b56842..000000000 --- a/api/client/stack/deploy.go +++ /dev/null @@ -1,208 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "github.com/spf13/cobra" - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/bundlefile" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/swarm" -) - -const ( - defaultNetworkDriver = "overlay" -) - -type deployOptions struct { - bundlefile string - namespace string -} - -func newDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts deployOptions - - cmd := &cobra.Command{ - Use: "deploy [OPTIONS] STACK", - Aliases: []string{"up"}, - Short: "Create and update a stack from a Distributed Application Bundle (DAB)", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runDeploy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - return cmd -} - -func runDeploy(dockerCli *client.DockerCli, opts deployOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - - networks := getUniqueNetworkNames(bundle.Services) - ctx := context.Background() - - if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil { - return err - } - return deployServices(ctx, dockerCli, bundle.Services, opts.namespace) -} - -func getUniqueNetworkNames(services map[string]bundlefile.Service) []string { - networkSet := make(map[string]bool) - for _, service := range services { - for _, network := range service.Networks { - networkSet[network] = true - } - } - - networks := []string{} - for network := range networkSet { - networks = append(networks, network) - } - return networks -} - -func updateNetworks( - ctx context.Context, - dockerCli *client.DockerCli, - networks []string, - namespace string, -) error { - client := dockerCli.Client() - - existingNetworks, err := getNetworks(ctx, client, namespace) - if err != nil { - return err - } - - existingNetworkMap := make(map[string]types.NetworkResource) - for _, network := range existingNetworks { - existingNetworkMap[network.Name] = network - } - - createOpts := types.NetworkCreate{ - Labels: getStackLabels(namespace, nil), - Driver: defaultNetworkDriver, - // TODO: remove when engine-api uses omitempty for IPAM - IPAM: network.IPAM{Driver: "default"}, - } - - for _, internalName := range networks { - name := fmt.Sprintf("%s_%s", namespace, internalName) - - if _, exists := existingNetworkMap[name]; exists { - continue - } - fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) - if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { - return err - } - } - return nil -} - -func convertNetworks(networks []string, namespace string, name string) []swarm.NetworkAttachmentConfig { - nets := []swarm.NetworkAttachmentConfig{} - for _, network := range networks { - nets = append(nets, swarm.NetworkAttachmentConfig{ - Target: namespace + "_" + network, - Aliases: []string{name}, - }) - } - return nets -} - -func deployServices( - ctx context.Context, - dockerCli *client.DockerCli, - services map[string]bundlefile.Service, - namespace string, -) error { - apiClient := dockerCli.Client() - out := dockerCli.Out() - - existingServices, err := getServices(ctx, apiClient, namespace) - if err != nil { - return err - } - - existingServiceMap := make(map[string]swarm.Service) - for _, service := range existingServices { - existingServiceMap[service.Spec.Name] = service - } - - for internalName, service := range services { - name := fmt.Sprintf("%s_%s", namespace, internalName) - - var ports []swarm.PortConfig - for _, portSpec := range service.Ports { - ports = append(ports, swarm.PortConfig{ - Protocol: swarm.PortConfigProtocol(portSpec.Protocol), - TargetPort: portSpec.Port, - }) - } - - serviceSpec := swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: name, - Labels: getStackLabels(namespace, service.Labels), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: service.Image, - Command: service.Command, - Args: service.Args, - Env: service.Env, - }, - }, - EndpointSpec: &swarm.EndpointSpec{ - Ports: ports, - }, - Networks: convertNetworks(service.Networks, namespace, internalName), - } - - cspec := &serviceSpec.TaskTemplate.ContainerSpec - if service.WorkingDir != nil { - cspec.Dir = *service.WorkingDir - } - if service.User != nil { - cspec.User = *service.User - } - - if service, exists := existingServiceMap[name]; exists { - fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) - - // TODO(nishanttotla): Pass auth token - if err := apiClient.ServiceUpdate( - ctx, - service.ID, - service.Version, - serviceSpec, - types.ServiceUpdateOptions{}, - ); err != nil { - return err - } - } else { - fmt.Fprintf(out, "Creating service %s\n", name) - - // TODO(nishanttotla): Pass headers with X-Registry-Auth - if _, err := apiClient.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{}); err != nil { - return err - } - } - } - - return nil -} diff --git a/api/client/stack/opts.go b/api/client/stack/opts.go deleted file mode 100644 index 81af1dfe2..000000000 --- a/api/client/stack/opts.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - "io" - "os" - - "github.com/docker/docker/api/client/bundlefile" - "github.com/spf13/pflag" -) - -func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVarP( - opt, - "bundle", "f", "", - "Path to a Distributed Application Bundle file (Default: STACK.dab)") -} - -func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { - defaultPath := fmt.Sprintf("%s.dab", namespace) - - if path == "" { - path = defaultPath - } - if _, err := os.Stat(path); err != nil { - return nil, fmt.Errorf( - "Bundle %s not found. Specify the path with -f or --bundle", - path) - } - - fmt.Fprintf(stderr, "Loading bundle from %s\n", path) - reader, err := os.Open(path) - if err != nil { - return nil, err - } - defer reader.Close() - - bundle, err := bundlefile.LoadFile(reader) - if err != nil { - return nil, fmt.Errorf("Error reading %s: %v\n", path, err) - } - return bundle, err -} diff --git a/api/client/stack/remove.go b/api/client/stack/remove.go deleted file mode 100644 index 9ba91e5c2..000000000 --- a/api/client/stack/remove.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type removeOptions struct { - namespace string -} - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm STACK", - Aliases: []string{"remove", "down"}, - Short: "Remove the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runRemove(dockerCli, opts) - }, - } - return cmd -} - -func runRemove(dockerCli *client.DockerCli, opts removeOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - stderr := dockerCli.Err() - ctx := context.Background() - hasError := false - - services, err := getServices(ctx, client, namespace) - if err != nil { - return err - } - for _, service := range services { - fmt.Fprintf(stderr, "Removing service %s\n", service.Spec.Name) - if err := client.ServiceRemove(ctx, service.ID); err != nil { - hasError = true - fmt.Fprintf(stderr, "Failed to remove service %s: %s", service.ID, err) - } - } - - networks, err := getNetworks(ctx, client, namespace) - if err != nil { - return err - } - for _, network := range networks { - fmt.Fprintf(stderr, "Removing network %s\n", network.Name) - if err := client.NetworkRemove(ctx, network.ID); err != nil { - hasError = true - fmt.Fprintf(stderr, "Failed to remove network %s: %s", network.ID, err) - } - } - - if len(services) == 0 && len(networks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - if hasError { - return fmt.Errorf("Failed to remove some resources") - } - return nil -} diff --git a/api/client/stack/tasks.go b/api/client/stack/tasks.go deleted file mode 100644 index e4d0d8bd4..000000000 --- a/api/client/stack/tasks.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -type tasksOptions struct { - all bool - filter opts.FilterOpt - namespace string - noResolve bool -} - -func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := tasksOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "tasks [OPTIONS] STACK", - Short: "List the tasks in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runTasks(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - ctx := context.Background() - - filter := opts.filter.Value() - filter.Add("label", labelNamespace+"="+opts.namespace) - if !opts.all && !filter.Include("desired-state") { - filter.Add("desired-state", string(swarm.TaskStateRunning)) - filter.Add("desired-state", string(swarm.TaskStateAccepted)) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - if len(tasks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/swarm/cmd.go b/api/client/swarm/cmd.go deleted file mode 100644 index 03a4c0a1c..000000000 --- a/api/client/swarm/cmd.go +++ /dev/null @@ -1,30 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewSwarmCommand returns a cobra command for `swarm` subcommands -func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "swarm", - Short: "Manage Docker Swarm", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newInitCommand(dockerCli), - newJoinCommand(dockerCli), - newUpdateCommand(dockerCli), - newLeaveCommand(dockerCli), - newInspectCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/swarm/init.go b/api/client/swarm/init.go deleted file mode 100644 index bd0570005..000000000 --- a/api/client/swarm/init.go +++ /dev/null @@ -1,95 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -const ( - generatedSecretEntropyBytes = 16 - generatedSecretBase = 36 - // floor(log(2^128-1, 36)) + 1 - maxGeneratedSecretLength = 25 -) - -type initOptions struct { - swarmOptions - listenAddr NodeAddrOption - forceNewCluster bool -} - -func newInitCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := initOptions{ - listenAddr: NewListenAddrOption(), - swarmOptions: swarmOptions{ - autoAccept: NewAutoAcceptOption(), - }, - } - - cmd := &cobra.Command{ - Use: "init", - Short: "Initialize a Swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInit(dockerCli, cmd.Flags(), opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, "listen-addr", "Listen address") - flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.") - addSwarmFlags(flags, &opts.swarmOptions) - return cmd -} - -func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - // If no secret was specified, we create a random one - if !flags.Changed("secret") { - opts.secret = generateRandomSecret() - fmt.Fprintf(dockerCli.Out(), "No --secret provided. Generated random secret:\n %s\n\n", opts.secret) - } - - req := swarm.InitRequest{ - ListenAddr: opts.listenAddr.String(), - ForceNewCluster: opts.forceNewCluster, - Spec: opts.swarmOptions.ToSpec(), - } - - nodeID, err := client.SwarmInit(ctx, req) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) - - // Fetch CAHash and Address from the API - info, err := client.Info(ctx) - if err != nil { - return err - } - - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - if node.ManagerStatus != nil && info.Swarm.CACertHash != "" { - var secretArgs string - if opts.secret != "" { - secretArgs = "--secret " + opts.secret - } - fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n docker swarm join %s \\\n --ca-hash %s \\\n %s\n", secretArgs, info.Swarm.CACertHash, node.ManagerStatus.Addr) - } - - return nil -} diff --git a/api/client/swarm/inspect.go b/api/client/swarm/inspect.go deleted file mode 100644 index 6419ca74b..000000000 --- a/api/client/swarm/inspect.go +++ /dev/null @@ -1,47 +0,0 @@ -package swarm - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS]", - Short: "Inspect the Swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - getRef := func(_ string) (interface{}, []byte, error) { - return swarm, nil, nil - } - - return inspect.Inspect(dockerCli.Out(), []string{""}, opts.format, getRef) -} diff --git a/api/client/swarm/join.go b/api/client/swarm/join.go deleted file mode 100644 index 0d3cb286c..000000000 --- a/api/client/swarm/join.go +++ /dev/null @@ -1,65 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type joinOptions struct { - remote string - listenAddr NodeAddrOption - manager bool - secret string - CACertHash string -} - -func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := joinOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "join [OPTIONS] HOST:PORT", - Short: "Join a Swarm as a node and/or manager", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runJoin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address") - flags.BoolVar(&opts.manager, "manager", false, "Try joining as a manager.") - flags.StringVar(&opts.secret, flagSecret, "", "Secret for node acceptance") - flags.StringVar(&opts.CACertHash, "ca-hash", "", "Hash of the Root Certificate Authority certificate used for trusted join") - return cmd -} - -func runJoin(dockerCli *client.DockerCli, opts joinOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.JoinRequest{ - Manager: opts.manager, - Secret: opts.secret, - ListenAddr: opts.listenAddr.String(), - RemoteAddrs: []string{opts.remote}, - CACertHash: opts.CACertHash, - } - err := client.SwarmJoin(ctx, req) - if err != nil { - return err - } - if opts.manager { - fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a manager.") - } else { - fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a worker.") - } - return nil -} diff --git a/api/client/swarm/leave.go b/api/client/swarm/leave.go deleted file mode 100644 index 9a45148c2..000000000 --- a/api/client/swarm/leave.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type leaveOptions struct { - force bool -} - -func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := leaveOptions{} - - cmd := &cobra.Command{ - Use: "leave", - Short: "Leave a Swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLeave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.force, "force", false, "Force leave ignoring warnings.") - return cmd -} - -func runLeave(dockerCli *client.DockerCli, opts leaveOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if err := client.SwarmLeave(ctx, opts.force); err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") - return nil -} diff --git a/api/client/swarm/opts.go b/api/client/swarm/opts.go deleted file mode 100644 index 92011fea2..000000000 --- a/api/client/swarm/opts.go +++ /dev/null @@ -1,262 +0,0 @@ -package swarm - -import ( - "encoding/csv" - "errors" - "fmt" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/pflag" -) - -const ( - defaultListenAddr = "0.0.0.0:2377" - - worker = "WORKER" - manager = "MANAGER" - none = "NONE" - - flagAutoAccept = "auto-accept" - flagCertExpiry = "cert-expiry" - flagDispatcherHeartbeat = "dispatcher-heartbeat" - flagListenAddr = "listen-addr" - flagSecret = "secret" - flagTaskHistoryLimit = "task-history-limit" - flagExternalCA = "external-ca" -) - -var ( - defaultPolicies = []swarm.Policy{ - {Role: worker, Autoaccept: true}, - {Role: manager, Autoaccept: false}, - } -) - -type swarmOptions struct { - autoAccept AutoAcceptOption - secret string - taskHistoryLimit int64 - dispatcherHeartbeat time.Duration - nodeCertExpiry time.Duration - externalCA ExternalCAOption -} - -// NodeAddrOption is a pflag.Value for listen and remote addresses -type NodeAddrOption struct { - addr string -} - -// String prints the representation of this flag -func (a *NodeAddrOption) String() string { - return a.Value() -} - -// Set the value for this flag -func (a *NodeAddrOption) Set(value string) error { - addr, err := opts.ParseTCPAddr(value, a.addr) - if err != nil { - return err - } - a.addr = addr - return nil -} - -// Type returns the type of this flag -func (a *NodeAddrOption) Type() string { - return "node-addr" -} - -// Value returns the value of this option as addr:port -func (a *NodeAddrOption) Value() string { - return strings.TrimPrefix(a.addr, "tcp://") -} - -// NewNodeAddrOption returns a new node address option -func NewNodeAddrOption(addr string) NodeAddrOption { - return NodeAddrOption{addr} -} - -// NewListenAddrOption returns a NodeAddrOption with default values -func NewListenAddrOption() NodeAddrOption { - return NewNodeAddrOption(defaultListenAddr) -} - -// AutoAcceptOption is a value type for auto-accept policy -type AutoAcceptOption struct { - values map[string]struct{} -} - -// String prints a string representation of this option -func (o *AutoAcceptOption) String() string { - keys := []string{} - for key := range o.values { - keys = append(keys, fmt.Sprintf("%s=true", strings.ToLower(key))) - } - return strings.Join(keys, ", ") -} - -// Set sets a new value on this option -func (o *AutoAcceptOption) Set(acceptValues string) error { - for _, value := range strings.Split(acceptValues, ",") { - value = strings.ToUpper(value) - switch value { - case none, worker, manager: - o.values[value] = struct{}{} - default: - return fmt.Errorf("must be one / combination of %s, %s; or NONE", worker, manager) - } - } - // NONE must stand alone, so if any non-NONE setting exist with it, error with conflict - if o.isPresent(none) && len(o.values) > 1 { - return fmt.Errorf("value NONE cannot be specified alongside other node types") - } - return nil -} - -// Type returns the type of this option -func (o *AutoAcceptOption) Type() string { - return "auto-accept" -} - -// Policies returns a representation of this option for the api -func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy { - policies := []swarm.Policy{} - for _, p := range defaultPolicies { - if len(o.values) != 0 { - if _, ok := o.values[string(p.Role)]; ok { - p.Autoaccept = true - } else { - p.Autoaccept = false - } - } - p.Secret = secret - policies = append(policies, p) - } - return policies -} - -// isPresent returns whether the key exists in the set or not -func (o *AutoAcceptOption) isPresent(key string) bool { - _, c := o.values[key] - return c -} - -// NewAutoAcceptOption returns a new auto-accept option -func NewAutoAcceptOption() AutoAcceptOption { - return AutoAcceptOption{values: make(map[string]struct{})} -} - -// ExternalCAOption is a Value type for parsing external CA specifications. -type ExternalCAOption struct { - values []*swarm.ExternalCA -} - -// Set parses an external CA option. -func (m *ExternalCAOption) Set(value string) error { - parsed, err := parseExternalCA(value) - if err != nil { - return err - } - - m.values = append(m.values, parsed) - return nil -} - -// Type returns the type of this option. -func (m *ExternalCAOption) Type() string { - return "external-ca" -} - -// String returns a string repr of this option. -func (m *ExternalCAOption) String() string { - externalCAs := []string{} - for _, externalCA := range m.values { - repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) - externalCAs = append(externalCAs, repr) - } - return strings.Join(externalCAs, ", ") -} - -// Value returns the external CAs -func (m *ExternalCAOption) Value() []*swarm.ExternalCA { - return m.values -} - -// parseExternalCA parses an external CA specification from the command line, -// such as protocol=cfssl,url=https://example.com. -func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { - csvReader := csv.NewReader(strings.NewReader(caSpec)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - - externalCA := swarm.ExternalCA{ - Options: make(map[string]string), - } - - var ( - hasProtocol bool - hasURL bool - ) - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - - if len(parts) != 2 { - return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key, value := parts[0], parts[1] - - switch strings.ToLower(key) { - case "protocol": - hasProtocol = true - if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { - externalCA.Protocol = swarm.ExternalCAProtocolCFSSL - } else { - return nil, fmt.Errorf("unrecognized external CA protocol %s", value) - } - case "url": - hasURL = true - externalCA.URL = value - default: - externalCA.Options[key] = value - } - } - - if !hasProtocol { - return nil, errors.New("the external-ca option needs a protocol= parameter") - } - if !hasURL { - return nil, errors.New("the external-ca option needs a url= parameter") - } - - return &externalCA, nil -} - -func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { - flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)") - flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to join a cluster") - flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit") - flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period") - flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates") - flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") -} - -func (opts *swarmOptions) ToSpec() swarm.Spec { - spec := swarm.Spec{} - if opts.secret != "" { - spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(&opts.secret) - } else { - spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(nil) - } - spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit - spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds()) - spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry - spec.CAConfig.ExternalCAs = opts.externalCA.Value() - return spec -} diff --git a/api/client/swarm/opts_test.go b/api/client/swarm/opts_test.go deleted file mode 100644 index a6dcdb525..000000000 --- a/api/client/swarm/opts_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package swarm - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/engine-api/types/swarm" -) - -func TestNodeAddrOptionSetHostAndPort(t *testing.T) { - opt := NewNodeAddrOption("old:123") - addr := "newhost:5555" - assert.NilError(t, opt.Set(addr)) - assert.Equal(t, opt.Value(), addr) -} - -func TestNodeAddrOptionSetHostOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("newhost")) - assert.Equal(t, opt.Value(), "newhost:2377") -} - -func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("::1")) - assert.Equal(t, opt.Value(), "[::1]:2377") -} - -func TestNodeAddrOptionSetPortOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set(":4545")) - assert.Equal(t, opt.Value(), "0.0.0.0:4545") -} - -func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { - opt := NewListenAddrOption() - assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") -} - -func TestAutoAcceptOptionSetWorker(t *testing.T) { - opt := NewAutoAcceptOption() - assert.NilError(t, opt.Set("worker")) - assert.Equal(t, opt.isPresent(worker), true) -} - -func TestAutoAcceptOptionSetManager(t *testing.T) { - opt := NewAutoAcceptOption() - assert.NilError(t, opt.Set("manager")) - assert.Equal(t, opt.isPresent(manager), true) -} - -func TestAutoAcceptOptionSetInvalid(t *testing.T) { - opt := NewAutoAcceptOption() - assert.Error(t, opt.Set("bogus"), "must be one / combination") -} - -func TestAutoAcceptOptionSetEmpty(t *testing.T) { - opt := NewAutoAcceptOption() - assert.Error(t, opt.Set(""), "must be one / combination") -} - -func TestAutoAcceptOptionSetNone(t *testing.T) { - opt := NewAutoAcceptOption() - assert.NilError(t, opt.Set("none")) - assert.Equal(t, opt.isPresent(manager), false) - assert.Equal(t, opt.isPresent(worker), false) -} - -func TestAutoAcceptOptionSetTwo(t *testing.T) { - opt := NewAutoAcceptOption() - assert.NilError(t, opt.Set("worker,manager")) - assert.Equal(t, opt.isPresent(manager), true) - assert.Equal(t, opt.isPresent(worker), true) -} - -func TestAutoAcceptOptionSetConflict(t *testing.T) { - opt := NewAutoAcceptOption() - assert.Error(t, opt.Set("none,manager"), "value NONE cannot be specified alongside other node types") - - opt = NewAutoAcceptOption() - assert.Error(t, opt.Set("none,worker"), "value NONE cannot be specified alongside other node types") - - opt = NewAutoAcceptOption() - assert.Error(t, opt.Set("worker,none,manager"), "value NONE cannot be specified alongside other node types") - - opt = NewAutoAcceptOption() - assert.Error(t, opt.Set("worker,manager,none"), "value NONE cannot be specified alongside other node types") -} - -func TestAutoAcceptOptionPoliciesDefault(t *testing.T) { - opt := NewAutoAcceptOption() - secret := "thesecret" - - policies := opt.Policies(&secret) - assert.Equal(t, len(policies), 2) - assert.Equal(t, policies[0], swarm.Policy{ - Role: worker, - Autoaccept: true, - Secret: &secret, - }) - assert.Equal(t, policies[1], swarm.Policy{ - Role: manager, - Autoaccept: false, - Secret: &secret, - }) -} - -func TestAutoAcceptOptionPoliciesWithManager(t *testing.T) { - opt := NewAutoAcceptOption() - secret := "thesecret" - - assert.NilError(t, opt.Set("manager")) - - policies := opt.Policies(&secret) - assert.Equal(t, len(policies), 2) - assert.Equal(t, policies[0], swarm.Policy{ - Role: worker, - Autoaccept: false, - Secret: &secret, - }) - assert.Equal(t, policies[1], swarm.Policy{ - Role: manager, - Autoaccept: true, - Secret: &secret, - }) -} - -func TestAutoAcceptOptionString(t *testing.T) { - opt := NewAutoAcceptOption() - assert.NilError(t, opt.Set("manager")) - assert.NilError(t, opt.Set("worker")) - - repr := opt.String() - assert.Contains(t, repr, "worker=true") - assert.Contains(t, repr, "manager=true") -} diff --git a/api/client/swarm/secret.go b/api/client/swarm/secret.go deleted file mode 100644 index e98f1d662..000000000 --- a/api/client/swarm/secret.go +++ /dev/null @@ -1,19 +0,0 @@ -package swarm - -import ( - cryptorand "crypto/rand" - "fmt" - "math/big" -) - -func generateRandomSecret() string { - var secretBytes [generatedSecretEntropyBytes]byte - - if _, err := cryptorand.Read(secretBytes[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - var nn big.Int - nn.SetBytes(secretBytes[:]) - return fmt.Sprintf("%0[1]*s", maxGeneratedSecretLength, nn.Text(generatedSecretBase)) -} diff --git a/api/client/swarm/update.go b/api/client/swarm/update.go deleted file mode 100644 index 147dd0807..000000000 --- a/api/client/swarm/update.go +++ /dev/null @@ -1,95 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := swarmOptions{autoAccept: NewAutoAcceptOption()} - - cmd := &cobra.Command{ - Use: "update", - Short: "Update the Swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), opts) - }, - } - - addSwarmFlags(cmd.Flags(), &opts) - return cmd -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - err = mergeSwarm(&swarm, flags) - if err != nil { - return err - } - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Swarm updated.") - - return nil -} - -func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error { - spec := &swarm.Spec - - if flags.Changed(flagAutoAccept) { - value := flags.Lookup(flagAutoAccept).Value.(*AutoAcceptOption) - spec.AcceptancePolicy.Policies = value.Policies(nil) - } - - var psecret *string - if flags.Changed(flagSecret) { - secret, _ := flags.GetString(flagSecret) - psecret = &secret - } - - for i := range spec.AcceptancePolicy.Policies { - spec.AcceptancePolicy.Policies[i].Secret = psecret - } - - if flags.Changed(flagTaskHistoryLimit) { - spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit) - } - - if flags.Changed(flagDispatcherHeartbeat) { - if v, err := flags.GetDuration(flagDispatcherHeartbeat); err == nil { - spec.Dispatcher.HeartbeatPeriod = uint64(v.Nanoseconds()) - } - } - - if flags.Changed(flagCertExpiry) { - if v, err := flags.GetDuration(flagCertExpiry); err == nil { - spec.CAConfig.NodeCertExpiry = v - } - } - - if flags.Changed(flagExternalCA) { - value := flags.Lookup(flagExternalCA).Value.(*ExternalCAOption) - spec.CAConfig.ExternalCAs = value.Value() - } - - return nil -} diff --git a/api/client/system/events.go b/api/client/system/events.go deleted file mode 100644 index 6faf11ee8..000000000 --- a/api/client/system/events.go +++ /dev/null @@ -1,115 +0,0 @@ -package system - -import ( - "fmt" - "io" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/engine-api/types" - eventtypes "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type eventsOptions struct { - since string - until string - filter []string -} - -// NewEventsCommand creats a new cobra.Command for `docker events` -func NewEventsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts eventsOptions - - cmd := &cobra.Command{ - Use: "events [OPTIONS]", - Short: "Get real time events from the server", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runEvents(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") - flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runEvents(dockerCli *client.DockerCli, opts *eventsOptions) error { - eventFilterArgs := filters.NewArgs() - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - for _, f := range opts.filter { - var err error - eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) - if err != nil { - return err - } - } - - options := types.EventsOptions{ - Since: opts.since, - Until: opts.until, - Filters: eventFilterArgs, - } - - responseBody, err := dockerCli.Client().Events(context.Background(), options) - if err != nil { - return err - } - defer responseBody.Close() - - return streamEvents(responseBody, dockerCli.Out()) -} - -// streamEvents decodes prints the incoming events in the provided output. -func streamEvents(input io.Reader, output io.Writer) error { - return DecodeEvents(input, func(event eventtypes.Message, err error) error { - if err != nil { - return err - } - printOutput(event, output) - return nil - }) -} - -type eventProcessor func(event eventtypes.Message, err error) error - -// printOutput prints all types of event information. -// Each output includes the event type, actor id, name and action. -// Actor attributes are printed at the end if the actor has any. -func printOutput(event eventtypes.Message, output io.Writer) { - if event.TimeNano != 0 { - fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if event.Time != 0 { - fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - - fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) - - if len(event.Actor.Attributes) > 0 { - var attrs []string - var keys []string - for k := range event.Actor.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := event.Actor.Attributes[k] - attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) - } - fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) - } - fmt.Fprint(output, "\n") -} diff --git a/api/client/system/events_utils.go b/api/client/system/events_utils.go deleted file mode 100644 index 685225daa..000000000 --- a/api/client/system/events_utils.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "encoding/json" - "io" - "sync" - - "github.com/Sirupsen/logrus" - eventtypes "github.com/docker/engine-api/types/events" -) - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} - -// DecodeEvents decodes event from input stream -func DecodeEvents(input io.Reader, ep eventProcessor) error { - dec := json.NewDecoder(input) - for { - var event eventtypes.Message - err := dec.Decode(&event) - if err != nil && err == io.EOF { - break - } - - if procErr := ep(event, err); procErr != nil { - return procErr - } - } - return nil -} diff --git a/api/client/system/info.go b/api/client/system/info.go deleted file mode 100644 index 20a7ba906..000000000 --- a/api/client/system/info.go +++ /dev/null @@ -1,203 +0,0 @@ -package system - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -// NewInfoCommand creates a new cobra.Command for `docker info` -func NewInfoCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "info", - Short: "Display system-wide information", - Args: cli.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return runInfo(dockerCli) - }, - } - return cmd - -} - -func runInfo(dockerCli *client.DockerCli) error { - info, err := dockerCli.Client().Info(context.Background()) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) - fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) - fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) - fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) - fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) - if info.DriverStatus != nil { - for _, pair := range info.DriverStatus { - fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) - - // print a warning if devicemapper is using a loopback file - if pair[0] == "Data loop file" { - fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") - } - } - - } - if info.SystemStatus != nil { - for _, pair := range info.SystemStatus { - fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) - } - } - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) - - fmt.Fprintf(dockerCli.Out(), "Plugins: \n") - fmt.Fprintf(dockerCli.Out(), " Volume:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), " Network:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - - if len(info.Plugins.Authorization) != 0 { - fmt.Fprintf(dockerCli.Out(), " Authorization:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - } - - fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) - if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive { - fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) - if info.Swarm.Error != "" { - fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) - } - fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) - if info.Swarm.ControlAvailable { - fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) - fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), " CA Certificate Hash: %s\n", info.Swarm.CACertHash) - } - } - - if len(info.Runtimes) > 0 { - fmt.Fprintf(dockerCli.Out(), "Runtimes:") - for name := range info.Runtimes { - fmt.Fprintf(dockerCli.Out(), " %s", name) - } - fmt.Fprint(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) - } - - fmt.Fprintf(dockerCli.Out(), "Security Options:") - ioutils.FprintfIfNotEmpty(dockerCli.Out(), " %s", strings.Join(info.SecurityOptions, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) - fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) - fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) - fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) - - if info.Debug { - fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) - fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) - fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) - } - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) - - if info.IndexServerAddress != "" { - u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username - if len(u) > 0 { - fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) - } - fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) - } - - // Only output these warnings if the server does not support these features - if info.OSType != "windows" { - if !info.MemoryLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") - } - if !info.SwapLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") - } - if !info.KernelMemory { - fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") - } - if !info.OomKillDisable { - fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") - } - if !info.CPUCfsQuota { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") - } - if !info.CPUCfsPeriod { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") - } - if !info.CPUShares { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") - } - if !info.CPUSet { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") - } - if !info.IPv4Forwarding { - fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") - } - if !info.BridgeNfIptables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") - } - if !info.BridgeNfIP6tables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") - } - } - - if info.Labels != nil { - fmt.Fprintln(dockerCli.Out(), "Labels:") - for _, attribute := range info.Labels { - fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) - } - } - - ioutils.FprintfIfTrue(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) - if info.ClusterStore != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) - } - - if info.ClusterAdvertise != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) - } - - if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { - fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") - for _, registry := range info.RegistryConfig.IndexConfigs { - if registry.Secure == false { - fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) - } - } - - for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { - mask, _ := registry.Mask.Size() - fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) - } - } - return nil -} diff --git a/api/client/system/version.go b/api/client/system/version.go deleted file mode 100644 index 2f3ae2d0b..000000000 --- a/api/client/system/version.go +++ /dev/null @@ -1,110 +0,0 @@ -package system - -import ( - "runtime" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/utils" - "github.com/docker/docker/utils/templates" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -var versionTemplate = `Client: - Version: {{.Client.Version}} - API version: {{.Client.APIVersion}} - Go version: {{.Client.GoVersion}} - Git commit: {{.Client.GitCommit}} - Built: {{.Client.BuildTime}} - OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} - Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} - -Server: - Version: {{.Server.Version}} - API version: {{.Server.APIVersion}} - Go version: {{.Server.GoVersion}} - Git commit: {{.Server.GitCommit}} - Built: {{.Server.BuildTime}} - OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} - Experimental: {{.Server.Experimental}}{{end}}{{end}}` - -type versionOptions struct { - format string -} - -// NewVersionCommand creats a new cobra.Command for `docker version` -func NewVersionCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts versionOptions - - cmd := &cobra.Command{ - Use: "version [OPTIONS]", - Short: "Show the Docker version information", - Args: cli.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return runVersion(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runVersion(dockerCli *client.DockerCli, opts *versionOptions) error { - ctx := context.Background() - - templateFormat := versionTemplate - if opts.format != "" { - templateFormat = opts.format - } - - tmpl, err := templates.Parse(templateFormat) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - - vd := types.VersionResponse{ - Client: &types.Version{ - Version: dockerversion.Version, - APIVersion: dockerCli.Client().ClientVersion(), - GoVersion: runtime.Version(), - GitCommit: dockerversion.GitCommit, - BuildTime: dockerversion.BuildTime, - Os: runtime.GOOS, - Arch: runtime.GOARCH, - Experimental: utils.ExperimentalBuild(), - }, - } - - serverVersion, err := dockerCli.Client().ServerVersion(ctx) - if err == nil { - vd.Server = &serverVersion - } - - // first we need to make BuildTime more human friendly - t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) - if errTime == nil { - vd.Client.BuildTime = t.Format(time.ANSIC) - } - - if vd.ServerOK() { - t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) - if errTime == nil { - vd.Server.BuildTime = t.Format(time.ANSIC) - } - } - - if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { - err = err2 - } - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/api/client/task/print.go b/api/client/task/print.go deleted file mode 100644 index bdb069ccf..000000000 --- a/api/client/task/print.go +++ /dev/null @@ -1,79 +0,0 @@ -package task - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" -) - -const ( - psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n" -) - -type tasksBySlot []swarm.Task - -func (t tasksBySlot) Len() int { - return len(t) -} - -func (t tasksBySlot) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (t tasksBySlot) Less(i, j int) bool { - // Sort by slot. - if t[i].Slot != t[j].Slot { - return t[i].Slot < t[j].Slot - } - - // If same slot, sort by most recent. - return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) -} - -// Print task information in a table format -func Print(dockerCli *client.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver) error { - sort.Stable(tasksBySlot(tasks)) - - writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "SERVICE", "IMAGE", "LAST STATE", "DESIRED STATE", "NODE"}, "\t")) - for _, task := range tasks { - serviceValue, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) - if err != nil { - return err - } - nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) - if err != nil { - return err - } - name := serviceValue - if task.Slot > 0 { - name = fmt.Sprintf("%s.%d", name, task.Slot) - } - fmt.Fprintf( - writer, - psTaskItemFmt, - task.ID, - name, - serviceValue, - task.Spec.ContainerSpec.Image, - client.PrettyPrint(task.Status.State), - strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), - client.PrettyPrint(task.DesiredState), - nodeValue, - ) - } - - return nil -} diff --git a/api/client/trust.go b/api/client/trust.go deleted file mode 100644 index 3066c39b5..000000000 --- a/api/client/trust.go +++ /dev/null @@ -1,598 +0,0 @@ -package client - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary/client" - "github.com/docker/notary/passphrase" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" - "github.com/spf13/pflag" -) - -var ( - releasesRole = path.Join(data.CanonicalTargetsRole, "releases") - untrusted bool -) - -// AddTrustedFlags adds content trust flags to the current command flagset -func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { - trusted, message := setupTrustedFlag(verify) - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) -} - -func setupTrustedFlag(verify bool) (bool, string) { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - message := "Skip image signing" - if verify { - message = "Skip image verification" - } - return trusted, message -} - -// IsTrusted returns true if content trust is enabled -func IsTrusted() bool { - return !untrusted -} - -type target struct { - reference registry.Reference - digest digest.Digest - size int64 -} - -func (cli *DockerCli) trustDirectory() string { - return filepath.Join(cliconfig.ConfigDir(), "trust") -} - -// certificateDirectory returns the directory containing -// TLS certificates for the given server. An error is -// returned if there was an error parsing the server string. -func (cli *DockerCli) certificateDirectory(server string) (string, error) { - u, err := url.Parse(server) - if err != nil { - return "", err - } - - return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil -} - -func trustServer(index *registrytypes.IndexInfo) (string, error) { - if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - urlObj, err := url.Parse(s) - if err != nil || urlObj.Scheme != "https" { - return "", fmt.Errorf("valid https URL required for trust server, got %s", s) - } - - return s, nil - } - if index.Official { - return registry.NotaryServer, nil - } - return "https://" + index.Name, nil -} - -type simpleCredentialStore struct { - auth types.AuthConfig -} - -func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { - return scs.auth.Username, scs.auth.Password -} - -func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { - return scs.auth.IdentityToken -} - -func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// getNotaryRepository returns a NotaryRepository which stores all the -// information needed to operate on a notary repository. -// It creates an HTTP transport providing authentication support. -func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { - server, err := trustServer(repoInfo.Index) - if err != nil { - return nil, err - } - - var cfg = tlsconfig.ClientDefault - cfg.InsecureSkipVerify = !repoInfo.Index.Secure - - // Get certificate base directory - certDir, err := cli.certificateDirectory(server) - if err != nil { - return nil, err - } - logrus.Debugf("reading certificate directory: %s", certDir) - - if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { - return nil, err - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &cfg, - DisableKeepAlives: true, - } - - // Skip configuration headers since request is not going to Docker daemon - modifiers := registry.DockerHeaders(clientUserAgent(), http.Header{}) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 5 * time.Second, - } - endpointStr := server + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - - challengeManager := auth.NewSimpleChallengeManager() - - resp, err := pingClient.Do(req) - if err != nil { - // Ignore error on ping to operate in offline mode - logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) - } else { - defer resp.Body.Close() - - // Add response to the challenge manager to parse out - // authentication header and register authentication method - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - } - - creds := simpleCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), actions...) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) - tr := transport.NewTransport(base, modifiers...) - - return client.NewNotaryRepository( - cli.trustDirectory(), repoInfo.FullName(), server, tr, cli.getPassphraseRetriever(), - trustpinning.TrustPinConfig{}) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - reference: registry.ParseReference(t.Name), - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { - aliasMap := map[string]string{ - "root": "root", - "snapshot": "repository", - "targets": "repository", - "default": "repository", - } - baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) - env := map[string]string{ - "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - } - - return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { - if v := env[alias]; v != "" { - return v, numAttempts > 1, nil - } - // For non-root roles, we can also try the "default" alias if it is specified - if v := env["default"]; v != "" && alias != data.CanonicalRootRole { - return v, numAttempts > 1, nil - } - return baseRetriever(keyName, alias, createNew, numAttempts) - } -} - -// TrustedReference returns the canonical trusted reference for an image reference -func (cli *DockerCli) TrustedReference(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := cli.ResolveAuthConfig(ctx, repoInfo.Index) - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return nil, err - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, err - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if t.Role != releasesRole && t.Role != data.CanonicalTargetsRole { - return nil, notaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - - return reference.WithDigest(ref, r.digest) -} - -// TagTrusted tags a trusted ref -func (cli *DockerCli) TagTrusted(ctx context.Context, trustedRef reference.Canonical, ref reference.NamedTagged) error { - fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) - - return cli.client.ImageTag(ctx, trustedRef.String(), ref.String()) -} - -func notaryError(repoName string, err error) error { - switch err.(type) { - case *json.SyntaxError: - logrus.Debugf("Notary syntax error: %s", err) - return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) - case signed.ErrExpired: - return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) - case trustmanager.ErrKeyNotFound: - return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) - case *net.OpError: - return fmt.Errorf("Error: error contacting notary server: %v", err) - case store.ErrMetaNotFound: - return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) - case signed.ErrInvalidKeyType: - return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) - case signed.ErrNoKeys: - return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) - case signed.ErrLowVersion: - return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) - case signed.ErrRoleThreshold: - return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) - case client.ErrRepositoryNotExist: - return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) - case signed.ErrInsufficientSignatures: - return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) - } - - return err -} - -// TrustedPull handles content trust pulling of an image -func (cli *DockerCli) TrustedPull(ctx context.Context, repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - var refs []target - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return err - } - - if ref.String() == "" { - // List all targets - targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name()) - continue - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if tgt.Role != releasesRole && tgt.Role != data.CanonicalTargetsRole { - continue - } - refs = append(refs, t) - } - if len(refs) == 0 { - return notaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) - } - } else { - t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != releasesRole && t.Role != data.CanonicalTargetsRole { - return notaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - r, err := convertTarget(t.Target) - if err != nil { - return err - - } - refs = append(refs, r) - } - - for i, r := range refs { - displayTag := r.reference.String() - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) - - ref, err := reference.WithDigest(repoInfo, r.digest) - if err != nil { - return err - } - if err := cli.ImagePullPrivileged(ctx, authConfig, ref.String(), requestPrivilege, false); err != nil { - return err - } - - // If reference is not trusted, tag by trusted reference - if !r.reference.HasDigest() { - tagged, err := reference.WithTag(repoInfo, r.reference.String()) - if err != nil { - return err - } - trustedRef, err := reference.WithDigest(repoInfo, r.digest) - if err != nil { - return err - } - if err := cli.TagTrusted(ctx, trustedRef, tagged); err != nil { - return err - } - } - } - return nil -} - -// TrustedPush handles content trust pushing of an image -func (cli *DockerCli) TrustedPush(ctx context.Context, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := cli.ImagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - // If it is a trusted push we would like to find the target entry which match the - // tag provided in the function and then do an AddTarget later. - target := &client.Target{} - // Count the times of calling for handleTarget, - // if it is called more that once, that should be considered an error in a trusted push. - cnt := 0 - handleTarget := func(aux *json.RawMessage) { - cnt++ - if cnt > 1 { - // handleTarget should only be called one. This will be treated as an error. - return - } - - var pushResult distribution.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { - h, err := hex.DecodeString(pushResult.Digest.Hex()) - if err != nil { - target = nil - return - } - target.Name = registry.ParseReference(pushResult.Tag).String() - target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} - target.Length = int64(pushResult.Size) - } - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - } - - // We want trust signatures to always take an explicit tag, - // otherwise it will act as an untrusted push. - if tag == "" { - if err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil); err != nil { - return err - } - fmt.Fprintln(cli.out, "No tag specified, skipping trust metadata push") - return nil - } - - if err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, handleTarget); err != nil { - return err - } - - if cnt > 1 { - return fmt.Errorf("internal error: only one call to handleTarget expected") - } - - if target == nil { - fmt.Fprintln(cli.out, "No targets found, please provide a specific tag in order to sign it") - return nil - } - - fmt.Fprintln(cli.out, "Signing and pushing trust metadata") - - repo, err := cli.getNotaryRepository(repoInfo, authConfig, "push", "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) - return err - } - - // get the latest repository metadata so we can figure out which roles to sign - err = repo.Update(false) - - switch err.(type) { - case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: - keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - // Initialize the notary repository with a remotely managed snapshot key - if err := repo.Initialize(rootKeyID, data.CanonicalSnapshotRole); err != nil { - return notaryError(repoInfo.FullName(), err) - } - fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName()) - err = repo.AddTarget(target, data.CanonicalTargetsRole) - case nil: - // already initialized and we have successfully downloaded the latest metadata - err = cli.addTargetToAllSignableRoles(repo, target) - default: - return notaryError(repoInfo.FullName(), err) - } - - if err == nil { - err = repo.Publish() - } - - if err != nil { - fmt.Fprintf(cli.out, "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) - return notaryError(repoInfo.FullName(), err) - } - - fmt.Fprintf(cli.out, "Successfully signed %q:%s\n", repoInfo.FullName(), tag) - return nil -} - -// Attempt to add the image target to all the top level delegation roles we can -// (based on whether we have the signing key and whether the role's path allows -// us to). -// If there are no delegation roles, we add to the targets role. -func (cli *DockerCli) addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { - var signableRoles []string - - // translate the full key names, which includes the GUN, into just the key IDs - allCanonicalKeyIDs := make(map[string]struct{}) - for fullKeyID := range repo.CryptoService.ListAllKeys() { - allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} - } - - allDelegationRoles, err := repo.GetDelegationRoles() - if err != nil { - return err - } - - // if there are no delegation roles, then just try to sign it into the targets role - if len(allDelegationRoles) == 0 { - return repo.AddTarget(target, data.CanonicalTargetsRole) - } - - // there are delegation roles, find every delegation role we have a key for, and - // attempt to sign into into all those roles. - for _, delegationRole := range allDelegationRoles { - // We do not support signing any delegation role that isn't a direct child of the targets role. - // Also don't bother checking the keys if we can't add the target - // to this role due to path restrictions - if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { - continue - } - - for _, canonicalKeyID := range delegationRole.KeyIDs { - if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { - signableRoles = append(signableRoles, delegationRole.Name) - break - } - } - } - - if len(signableRoles) == 0 { - return fmt.Errorf("no valid signing keys for delegation roles") - } - - return repo.AddTarget(target, signableRoles...) -} - -// ImagePullPrivileged pulls the image and displays it to the output -func (cli *DockerCli) ImagePullPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { - - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - All: all, - } - - responseBody, err := cli.client.ImagePull(ctx, ref, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) -} - -// ImagePushPrivileged push the image -func (cli *DockerCli) ImagePushPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - } - - return cli.client.ImagePush(ctx, ref, options) -} diff --git a/api/client/trust_test.go b/api/client/trust_test.go deleted file mode 100644 index ec95bd9d3..000000000 --- a/api/client/trust_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "os" - "testing" - - "github.com/docker/docker/registry" - registrytypes "github.com/docker/engine-api/types/registry" -) - -func unsetENV() { - os.Unsetenv("DOCKER_CONTENT_TRUST") - os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") -} - -func TestENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - output, err := trustServer(indexInfo) - expectedStr := "https://notary-test.com:5000" - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} - -func TestHTTPENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - _, err := trustServer(indexInfo) - if err == nil { - t.Fatal("Expected error with invalid scheme") - } -} - -func TestOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} - output, err := trustServer(indexInfo) - if err != nil || output != registry.NotaryServer { - t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) - } -} - -func TestNonOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} - output, err := trustServer(indexInfo) - expectedStr := "https://" + indexInfo.Name - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} diff --git a/api/client/utils.go b/api/client/utils.go deleted file mode 100644 index 99b0f8618..000000000 --- a/api/client/utils.go +++ /dev/null @@ -1,190 +0,0 @@ -package client - -import ( - "fmt" - "io" - "io/ioutil" - "os" - gosignal "os/signal" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" -) - -func (cli *DockerCli) resizeTty(ctx context.Context, id string, isExec bool) { - height, width := cli.GetTtySize() - cli.ResizeTtyTo(ctx, id, height, width, isExec) -} - -// ResizeTtyTo resizes tty to specific height and width -// TODO: this can be unexported again once all container related commands move to package container -func (cli *DockerCli) ResizeTtyTo(ctx context.Context, id string, height, width int, isExec bool) { - if height == 0 && width == 0 { - return - } - - options := types.ResizeOptions{ - Height: height, - Width: width, - } - - var err error - if isExec { - err = cli.client.ContainerExecResize(ctx, id, options) - } else { - err = cli.client.ContainerResize(ctx, id, options) - } - - if err != nil { - logrus.Debugf("Error resize: %s", err) - } -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func (cli *DockerCli) getExecExitCode(ctx context.Context, execID string) (bool, int, error) { - resp, err := cli.client.ContainerExecInspect(ctx, execID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != client.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - - return resp.Running, resp.ExitCode, nil -} - -// MonitorTtySize updates the container tty size when the terminal tty changes size -func (cli *DockerCli) MonitorTtySize(ctx context.Context, id string, isExec bool) error { - cli.resizeTty(ctx, id, isExec) - - if runtime.GOOS == "windows" { - go func() { - prevH, prevW := cli.GetTtySize() - for { - time.Sleep(time.Millisecond * 250) - h, w := cli.GetTtySize() - - if prevW != w || prevH != h { - cli.resizeTty(ctx, id, isExec) - } - prevH = h - prevW = w - } - }() - } else { - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for range sigchan { - cli.resizeTty(ctx, id, isExec) - } - }() - } - return nil -} - -// GetTtySize returns the height and width in characters of the tty -func (cli *DockerCli) GetTtySize() (int, int) { - if !cli.isTerminalOut { - return 0, 0 - } - ws, err := term.GetWinsize(cli.outFd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// ForwardAllSignals forwards signals to the container -// TODO: this can be unexported again once all container commands are under -// api/client/container -func (cli *DockerCli) ForwardAllSignals(ctx context.Context, cid string) chan os.Signal { - sigc := make(chan os.Signal, 128) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == signal.SIGCHLD || s == signal.SIGPIPE { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) - continue - } - - if err := cli.client.ContainerKill(ctx, cid, sig); err != nil { - logrus.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} diff --git a/api/client/volume/cmd.go b/api/client/volume/cmd.go deleted file mode 100644 index bc8f55ccc..000000000 --- a/api/client/volume/cmd.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewVolumeCommand returns a cobra command for `volume` subcommands -func NewVolumeCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "volume", - Short: "Manage Docker volumes", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/volume/create.go b/api/client/volume/create.go deleted file mode 100644 index a38ff556c..000000000 --- a/api/client/volume/create.go +++ /dev/null @@ -1,62 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels []string -} - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create", - Short: "Create a volume", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") - flags.StringVar(&opts.name, "name", "", "Specify volume name") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.StringSliceVar(&opts.labels, "label", []string{}, "Set metadata for a volume") - - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - volReq := types.VolumeCreateRequest{ - Driver: opts.driver, - DriverOpts: opts.driverOpts.GetAll(), - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels), - } - - vol, err := client.VolumeCreate(context.Background(), volReq) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) - return nil -} diff --git a/api/client/volume/inspect.go b/api/client/volume/inspect.go deleted file mode 100644 index db36926f1..000000000 --- a/api/client/volume/inspect.go +++ /dev/null @@ -1,46 +0,0 @@ -package volume - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] VOLUME [VOLUME...]", - Short: "Display detailed information on one or more volumes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getVolFunc := func(name string) (interface{}, []byte, error) { - i, err := client.VolumeInspect(ctx, name) - return i, nil, err - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) -} diff --git a/api/client/volume/list.go b/api/client/volume/list.go deleted file mode 100644 index 01ea5a6ce..000000000 --- a/api/client/volume/list.go +++ /dev/null @@ -1,86 +0,0 @@ -package volume - -import ( - "fmt" - "sort" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type byVolumeName []*types.Volume - -func (r byVolumeName) Len() int { return len(r) } -func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byVolumeName) Less(i, j int) bool { - return r[i].Name < r[j].Name -} - -type listOptions struct { - quiet bool - filter []string -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List volumes", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Provide filter values (i.e. 'dangling=true')") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - - volFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) - if err != nil { - return err - } - } - - volumes, err := client.VolumeList(context.Background(), volFilterArgs) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - if !opts.quiet { - for _, warn := range volumes.Warnings { - fmt.Fprintln(dockerCli.Err(), warn) - } - fmt.Fprintf(w, "DRIVER \tVOLUME NAME") - fmt.Fprintf(w, "\n") - } - - sort.Sort(byVolumeName(volumes.Volumes)) - for _, vol := range volumes.Volumes { - if opts.quiet { - fmt.Fprintln(w, vol.Name) - continue - } - fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) - } - w.Flush() - return nil -} diff --git a/api/client/volume/remove.go b/api/client/volume/remove.go deleted file mode 100644 index f47c93ba4..000000000 --- a/api/client/volume/remove.go +++ /dev/null @@ -1,43 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm VOLUME [VOLUME]...", - Aliases: []string{"remove"}, - Short: "Remove a volume", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *client.DockerCli, volumes []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range volumes { - if err := client.VolumeRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/api/common.go b/api/common.go deleted file mode 100644 index f0a7d4834..000000000 --- a/api/common.go +++ /dev/null @@ -1,169 +0,0 @@ -package api - -import ( - "encoding/json" - "encoding/pem" - "fmt" - "mime" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/docker/libtrust" -) - -// Common constants for daemon and client. -const ( - // Version of Current REST API - DefaultVersion string = "1.25" - - // MinVersion represents Minimum REST API version supported - MinVersion string = "1.12" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier string = "scratch" -) - -// byPortInfo is a temporary type used to sort types.Port by its fields -type byPortInfo []types.Port - -func (r byPortInfo) Len() int { return len(r) } -func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortInfo) Less(i, j int) bool { - if r[i].PrivatePort != r[j].PrivatePort { - return r[i].PrivatePort < r[j].PrivatePort - } - - if r[i].IP != r[j].IP { - return r[i].IP < r[j].IP - } - - if r[i].PublicPort != r[j].PublicPort { - return r[i].PublicPort < r[j].PublicPort - } - - return r[i].Type < r[j].Type -} - -// DisplayablePorts returns formatted string representing open ports of container -// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" -// it's used by command 'docker ps' -func DisplayablePorts(ports []types.Port) string { - type portGroup struct { - first int - last int - } - groupMap := make(map[string]*portGroup) - var result []string - var hostMappings []string - var groupMapKeys []string - sort.Sort(byPortInfo(ports)) - for _, port := range ports { - current := port.PrivatePort - portKey := port.Type - if port.IP != "" { - if port.PublicPort != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) - continue - } - portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) - } - group := groupMap[portKey] - - if group == nil { - groupMap[portKey] = &portGroup{first: current, last: current} - // record order that groupMap keys are created - groupMapKeys = append(groupMapKeys, portKey) - continue - } - if current == (group.last + 1) { - group.last = current - continue - } - - result = append(result, formGroup(portKey, group.first, group.last)) - groupMap[portKey] = &portGroup{first: current, last: current} - } - for _, portKey := range groupMapKeys { - g := groupMap[portKey] - result = append(result, formGroup(portKey, g.first, g.last)) - } - result = append(result, hostMappings...) - return strings.Join(result, ", ") -} - -func formGroup(key string, start, last int) string { - parts := strings.Split(key, "/") - groupType := parts[0] - var ip string - if len(parts) > 1 { - ip = parts[0] - groupType = parts[1] - } - group := strconv.Itoa(start) - if start != last { - group = fmt.Sprintf("%s-%d", group, last) - } - if ip != "" { - group = fmt.Sprintf("%s:%s->%s", ip, group, group) - } - return fmt.Sprintf("%s/%s", group, groupType) -} - -// MatchesContentType validates the content type against the expected one -func MatchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/api/common_test.go b/api/common_test.go deleted file mode 100644 index c214660cc..000000000 --- a/api/common_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package api - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "os" - - "github.com/docker/engine-api/types" -) - -type ports struct { - ports []types.Port - expected string -} - -// DisplayablePorts -func TestDisplayablePorts(t *testing.T) { - cases := []ports{ - { - []types.Port{ - { - PrivatePort: 9988, - Type: "tcp", - }, - }, - "9988/tcp"}, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp", - }, - { - []types.Port{ - { - IP: "0.0.0.0", - PrivatePort: 9988, - Type: "tcp", - }, - }, - "0.0.0.0:0->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "4.3.2.1:8899->9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 9988, - Type: "tcp", - }, - }, - "4.3.2.1:9988->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp, 9988/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 9998, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 9999, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:9998-9999->9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 8887, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 8888, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", - }, - { - []types.Port{ - { - PrivatePort: 9998, - Type: "udp", - }, { - PrivatePort: 9999, - Type: "udp", - }, - }, - "9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, - }, - "9988/udp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - PrivatePort: 80, - Type: "tcp", - }, { - PrivatePort: 1024, - Type: "tcp", - }, { - PrivatePort: 80, - Type: "udp", - }, { - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, - }, - "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", - }, - } - - for _, port := range cases { - actual := DisplayablePorts(port.ports) - if port.expected != actual { - t.Fatalf("Expected %s, got %s.", port.expected, actual) - } - } -} - -// MatchesContentType -func TestJsonContentType(t *testing.T) { - if !MatchesContentType("application/json", "application/json") { - t.Fail() - } - - if !MatchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if MatchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} - -// LoadOrCreateTrustKey -func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") - if err != nil { - t.Fatal(err) - } - - if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { - t.Fatalf("expected an error, got nothing.") - } - -} - -func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - // Without the need to create the folder hierarchy - tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With the need to create the folder hierarchy as tmpKeyFie is in a path - // where some folders do not exist. - tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With no path at all - defer os.Remove("keyfile") - if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat("keyfile"); err != nil { - t.Fatalf("Expected to find a file keyfile, got %v", err) - } -} - -func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { - tmpKeyFile := filepath.Join("fixtures", "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a key file, got : %v and %v", err, key) - } -} diff --git a/api/fixtures/keyfile b/api/fixtures/keyfile deleted file mode 100644 index 322f25440..000000000 --- a/api/fixtures/keyfile +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY - -MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 -AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky -NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== ------END EC PRIVATE KEY----- diff --git a/api/server/httputils/decoder.go b/api/server/httputils/decoder.go deleted file mode 100644 index dbe469cca..000000000 --- a/api/server/httputils/decoder.go +++ /dev/null @@ -1,16 +0,0 @@ -package httputils - -import ( - "io" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// ContainerDecoder specifies how -// to translate an io.Reader into -// container configuration. -type ContainerDecoder interface { - DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) - DecodeHostConfig(src io.Reader) (*container.HostConfig, error) -} diff --git a/api/server/httputils/errors.go b/api/server/httputils/errors.go deleted file mode 100644 index da4db9791..000000000 --- a/api/server/httputils/errors.go +++ /dev/null @@ -1,93 +0,0 @@ -package httputils - -import ( - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "github.com/gorilla/mux" - "google.golang.org/grpc" -) - -// httpStatusError is an interface -// that errors with custom status codes -// implement to tell the api layer -// which response status to set. -type httpStatusError interface { - HTTPErrorStatusCode() int -} - -// inputValidationError is an interface -// that errors generated by invalid -// inputs can implement to tell the -// api layer to set a 400 status code -// in the response. -type inputValidationError interface { - IsValidationError() bool -} - -// GetHTTPErrorStatusCode retrieve status code from error message -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - errMsg := err.Error() - - switch e := err.(type) { - case httpStatusError: - statusCode = e.HTTPErrorStatusCode() - case inputValidationError: - statusCode = http.StatusBadRequest - default: - // FIXME: this is brittle and should not be necessary, but we still need to identify if - // there are errors falling back into this logic. - // If we need to differentiate between different possible error types, - // we should create appropriate error types that implement the httpStatusError interface. - errStr := strings.ToLower(errMsg) - for keyword, status := range map[string]int{ - "not found": http.StatusNotFound, - "no such": http.StatusNotFound, - "bad parameter": http.StatusBadRequest, - "no command": http.StatusBadRequest, - "conflict": http.StatusConflict, - "impossible": http.StatusNotAcceptable, - "wrong login/password": http.StatusUnauthorized, - "unauthorized": http.StatusUnauthorized, - "hasn't been activated": http.StatusForbidden, - "this node": http.StatusNotAcceptable, - } { - if strings.Contains(errStr, keyword) { - statusCode = status - break - } - } - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - -// MakeErrorHandler makes an HTTP handler that decodes a Docker error and -// returns it in the response. -func MakeErrorHandler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - statusCode := GetHTTPErrorStatusCode(err) - vars := mux.Vars(r) - if vars["version"] == "" || versions.GreaterThan(vars["version"], "1.23") { - response := &types.ErrorResponse{ - Message: err.Error(), - } - WriteJSON(w, statusCode, response) - } else { - http.Error(w, grpc.ErrorDesc(err), statusCode) - } - } -} diff --git a/api/server/httputils/form.go b/api/server/httputils/form.go deleted file mode 100644 index 20188c12d..000000000 --- a/api/server/httputils/form.go +++ /dev/null @@ -1,73 +0,0 @@ -package httputils - -import ( - "fmt" - "net/http" - "path/filepath" - "strconv" - "strings" -) - -// BoolValue transforms a form value in different formats into a boolean type. -func BoolValue(r *http.Request, k string) bool { - s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} - -// BoolValueOrDefault returns the default bool passed if the query param is -// missing, otherwise it's just a proxy to boolValue above -func BoolValueOrDefault(r *http.Request, k string, d bool) bool { - if _, ok := r.Form[k]; !ok { - return d - } - return BoolValue(r, k) -} - -// Int64ValueOrZero parses a form value into an int64 type. -// It returns 0 if the parsing fails. -func Int64ValueOrZero(r *http.Request, k string) int64 { - val, err := Int64ValueOrDefault(r, k, 0) - if err != nil { - return 0 - } - return val -} - -// Int64ValueOrDefault parses a form value into an int64 type. If there is an -// error, returns the error. If there is no value returns the default value. -func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { - if r.Form.Get(field) != "" { - value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) - if err != nil { - return value, err - } - return value, nil - } - return def, nil -} - -// ArchiveOptions stores archive information for different operations. -type ArchiveOptions struct { - Name string - Path string -} - -// ArchiveFormValues parses form values and turns them into ArchiveOptions. -// It fails if the archive name and path are not in the request. -func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { - if err := ParseForm(r); err != nil { - return ArchiveOptions{}, err - } - - name := vars["name"] - path := filepath.FromSlash(r.Form.Get("path")) - - switch { - case name == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") - case path == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") - } - - return ArchiveOptions{name, path}, nil -} diff --git a/api/server/httputils/form_test.go b/api/server/httputils/form_test.go deleted file mode 100644 index c56f7c15e..000000000 --- a/api/server/httputils/form_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package httputils - -import ( - "net/http" - "net/url" - "testing" -) - -func TestBoolValue(t *testing.T) { - cases := map[string]bool{ - "": false, - "0": false, - "no": false, - "false": false, - "none": false, - "1": true, - "yes": true, - "true": true, - "one": true, - "100": true, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a := BoolValue(r, "test") - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - } -} - -func TestBoolValueOrDefault(t *testing.T) { - r, _ := http.NewRequest("GET", "", nil) - if !BoolValueOrDefault(r, "queryparam", true) { - t.Fatal("Expected to get true default value, got false") - } - - v := url.Values{} - v.Set("param", "") - r, _ = http.NewRequest("GET", "", nil) - r.Form = v - if BoolValueOrDefault(r, "param", true) { - t.Fatal("Expected not to get true") - } -} - -func TestInt64ValueOrZero(t *testing.T) { - cases := map[string]int64{ - "": 0, - "asdf": 0, - "0": 0, - "1": 1, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a := Int64ValueOrZero(r, "test") - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - } -} - -func TestInt64ValueOrDefault(t *testing.T) { - cases := map[string]int64{ - "": -1, - "-1": -1, - "42": 42, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a, err := Int64ValueOrDefault(r, "test", -1) - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - if err != nil { - t.Fatalf("Error should be nil, but received: %s", err) - } - } -} - -func TestInt64ValueOrDefaultWithError(t *testing.T) { - v := url.Values{} - v.Set("test", "invalid") - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - _, err := Int64ValueOrDefault(r, "test", -1) - if err == nil { - t.Fatalf("Expected an error.") - } -} diff --git a/api/server/httputils/httputils.go b/api/server/httputils/httputils.go deleted file mode 100644 index 3b0f7a0dc..000000000 --- a/api/server/httputils/httputils.go +++ /dev/null @@ -1,106 +0,0 @@ -package httputils - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api" -) - -// APIVersionKey is the client's requested API version. -const APIVersionKey = "api-version" - -// UAStringKey is used as key type for user-agent string in net/context struct -const UAStringKey = "upstream-user-agent" - -// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. -// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). -type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -// HijackConnection interrupts the http response writer to get the -// underlying connection and operate with it. -func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -// CloseStreams ensures that a list for http streams are properly closed. -func CloseStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// CheckForJSON makes sure that the request's Content-Type is application/json. -func CheckForJSON(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { - return nil - } - return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) -} - -// ParseForm ensures the request form is parsed even with invalid content types. -// If we don't do this, POST method without Content-type (even with empty body) will fail. -func ParseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// ParseMultipartForm ensures the request form is parsed, even with invalid content types. -func ParseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// WriteJSON writes the value v to the http response stream as json with standard json encoding. -func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return json.NewEncoder(w).Encode(v) -} - -// VersionFromContext returns an API version from the context using APIVersionKey. -// It panics if the context value does not have version.Version type. -func VersionFromContext(ctx context.Context) (ver string) { - if ctx == nil { - return - } - val := ctx.Value(APIVersionKey) - if val == nil { - return - } - return val.(string) -} diff --git a/api/server/middleware.go b/api/server/middleware.go deleted file mode 100644 index 108e3c077..000000000 --- a/api/server/middleware.go +++ /dev/null @@ -1,24 +0,0 @@ -package server - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" -) - -// handleWithGlobalMiddlwares wraps the handler function for a request with -// the server's global middlewares. The order of the middlewares is backwards, -// meaning that the first in the list will be evaluated last. -func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { - next := handler - - for _, m := range s.middlewares { - next = m.WrapHandler(next) - } - - if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { - next = middleware.DebugRequestMiddleware(next) - } - - return next -} diff --git a/api/server/middleware/cors.go b/api/server/middleware/cors.go deleted file mode 100644 index ea725dbc7..000000000 --- a/api/server/middleware/cors.go +++ /dev/null @@ -1,37 +0,0 @@ -package middleware - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" -) - -// CORSMiddleware injects CORS headers to each request -// when it's configured. -type CORSMiddleware struct { - defaultHeaders string -} - -// NewCORSMiddleware creates a new CORSMiddleware with default headers. -func NewCORSMiddleware(d string) CORSMiddleware { - return CORSMiddleware{defaultHeaders: d} -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - corsHeaders := c.defaultHeaders - if corsHeaders == "" { - corsHeaders = "*" - } - - logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) - w.Header().Add("Access-Control-Allow-Origin", corsHeaders) - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") - return handler(ctx, w, r, vars) - } -} diff --git a/api/server/middleware/debug.go b/api/server/middleware/debug.go deleted file mode 100644 index e0167f0a7..000000000 --- a/api/server/middleware/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package middleware - -import ( - "bufio" - "encoding/json" - "io" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/ioutils" - "golang.org/x/net/context" -) - -// DebugRequestMiddleware dumps the request to logger -func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) - - if r.Method != "POST" { - return handler(ctx, w, r, vars) - } - if err := httputils.CheckForJSON(r); err != nil { - return handler(ctx, w, r, vars) - } - maxBodySize := 4096 // 4KB - if r.ContentLength > int64(maxBodySize) { - return handler(ctx, w, r, vars) - } - - body := r.Body - bufReader := bufio.NewReaderSize(body, maxBodySize) - r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - b, err := bufReader.Peek(maxBodySize) - if err != io.EOF { - // either there was an error reading, or the buffer is full (in which case the request is too large) - return handler(ctx, w, r, vars) - } - - var postForm map[string]interface{} - if err := json.Unmarshal(b, &postForm); err == nil { - maskSecretKeys(postForm) - formStr, errMarshal := json.Marshal(postForm) - if errMarshal == nil { - logrus.Debugf("form data: %s", string(formStr)) - } else { - logrus.Debugf("form data: %q", postForm) - } - } - - return handler(ctx, w, r, vars) - } -} - -func maskSecretKeys(inp interface{}) { - if arr, ok := inp.([]interface{}); ok { - for _, f := range arr { - maskSecretKeys(f) - } - return - } - if form, ok := inp.(map[string]interface{}); ok { - loop0: - for k, v := range form { - for _, m := range []string{"password", "secret"} { - if strings.EqualFold(m, k) { - form[k] = "*****" - continue loop0 - } - } - maskSecretKeys(v) - } - } -} diff --git a/api/server/middleware/middleware.go b/api/server/middleware/middleware.go deleted file mode 100644 index dc1f5bfa0..000000000 --- a/api/server/middleware/middleware.go +++ /dev/null @@ -1,13 +0,0 @@ -package middleware - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// Middleware is an interface to allow the use of ordinary functions as Docker API filters. -// Any struct that has the appropriate signature can be registered as a middleware. -type Middleware interface { - WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error -} diff --git a/api/server/middleware/user_agent.go b/api/server/middleware/user_agent.go deleted file mode 100644 index 87c8cb83e..000000000 --- a/api/server/middleware/user_agent.go +++ /dev/null @@ -1,47 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -// UserAgentMiddleware is a middleware that -// validates the client user-agent. -type UserAgentMiddleware struct { - serverVersion string -} - -// NewUserAgentMiddleware creates a new UserAgentMiddleware -// with the server version. -func NewUserAgentMiddleware(s string) UserAgentMiddleware { - return UserAgentMiddleware{ - serverVersion: s, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (u UserAgentMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - ctx = context.WithValue(ctx, httputils.UAStringKey, r.Header.Get("User-Agent")) - - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - - // v1.20 onwards includes the GOOS of the client after the version - // such as Docker/1.7.0 (linux) - if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { - userAgent[1] = strings.Split(userAgent[1], " ")[0] - } - - if len(userAgent) == 2 && !versions.Equal(u.serverVersion, userAgent[1]) { - logrus.Debugf("Client and server don't have the same version (client: %s, server: %s)", userAgent[1], u.serverVersion) - } - } - return handler(ctx, w, r, vars) - } -} diff --git a/api/server/middleware/version.go b/api/server/middleware/version.go deleted file mode 100644 index eb7bbf3a3..000000000 --- a/api/server/middleware/version.go +++ /dev/null @@ -1,59 +0,0 @@ -package middleware - -import ( - "fmt" - "net/http" - "runtime" - - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -type badRequestError struct { - error -} - -func (badRequestError) HTTPErrorStatusCode() int { - return http.StatusBadRequest -} - -// VersionMiddleware is a middleware that -// validates the client and server versions. -type VersionMiddleware struct { - serverVersion string - defaultVersion string - minVersion string -} - -// NewVersionMiddleware creates a new VersionMiddleware -// with the default versions. -func NewVersionMiddleware(s, d, m string) VersionMiddleware { - return VersionMiddleware{ - serverVersion: s, - defaultVersion: d, - minVersion: m, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - apiVersion := vars["version"] - if apiVersion == "" { - apiVersion = v.defaultVersion - } - - if versions.GreaterThan(apiVersion, v.defaultVersion) { - return badRequestError{fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, v.defaultVersion)} - } - if versions.LessThan(apiVersion, v.minVersion) { - return badRequestError{fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)} - } - - header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) - w.Header().Set("Server", header) - ctx = context.WithValue(ctx, "api-version", apiVersion) - return handler(ctx, w, r, vars) - } - -} diff --git a/api/server/middleware/version_test.go b/api/server/middleware/version_test.go deleted file mode 100644 index 90dee7138..000000000 --- a/api/server/middleware/version_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package middleware - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -func TestVersionMiddleware(t *testing.T) { - handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - return nil - } - - defaultVersion := "1.10.0" - minVersion := "1.2.0" - m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) - h := m.WrapHandler(handler) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - if err := h(ctx, resp, req, map[string]string{}); err != nil { - t.Fatal(err) - } -} - -func TestVersionMiddlewareWithErrors(t *testing.T) { - handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - return nil - } - - defaultVersion := "1.10.0" - minVersion := "1.2.0" - m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) - h := m.WrapHandler(handler) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - - vars := map[string]string{"version": "0.1"} - err := h(ctx, resp, req, vars) - - if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { - t.Fatalf("Expected too old client error, got %v", err) - } - - vars["version"] = "100000" - err = h(ctx, resp, req, vars) - if !strings.Contains(err.Error(), "client is newer than server") { - t.Fatalf("Expected client newer than server error, got %v", err) - } -} diff --git a/api/server/profiler.go b/api/server/profiler.go deleted file mode 100644 index 8bf8384fd..000000000 --- a/api/server/profiler.go +++ /dev/null @@ -1,41 +0,0 @@ -package server - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - - "github.com/gorilla/mux" -) - -const debugPathPrefix = "/debug/" - -func profilerSetup(mainRouter *mux.Router) { - var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() - r.HandleFunc("/vars", expVars) - r.HandleFunc("/pprof/", pprof.Index) - r.HandleFunc("/pprof/cmdline", pprof.Cmdline) - r.HandleFunc("/pprof/profile", pprof.Profile) - r.HandleFunc("/pprof/symbol", pprof.Symbol) - r.HandleFunc("/pprof/trace", pprof.Trace) - r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) - r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) - r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -// Replicated from expvar.go as not public. -func expVars(w http.ResponseWriter, r *http.Request) { - first := true - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/api/server/router/build/backend.go b/api/server/router/build/backend.go deleted file mode 100644 index 18ba1b276..000000000 --- a/api/server/router/build/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package build - -import ( - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. -type Backend interface { - // Build builds a Docker image referenced by an imageID string. - // - // Note: Tagging an image should not be done by a Builder, it should instead be done - // by the caller. - // - // TODO: make this return a reference instead of string - BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) -} diff --git a/api/server/router/build/build.go b/api/server/router/build/build.go deleted file mode 100644 index 959498e0f..000000000 --- a/api/server/router/build/build.go +++ /dev/null @@ -1,29 +0,0 @@ -package build - -import "github.com/docker/docker/api/server/router" - -// buildRouter is a router to talk with the build controller -type buildRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &buildRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the build controller -func (r *buildRouter) Routes() []router.Route { - return r.routes -} - -func (r *buildRouter) initRoutes() { - r.routes = []router.Route{ - router.Cancellable(router.NewPostRoute("/build", r.postBuild)), - } -} diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go deleted file mode 100644 index b5a7c28b6..000000000 --- a/api/server/router/build/build_routes.go +++ /dev/null @@ -1,194 +0,0 @@ -package build - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions" - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { - version := httputils.VersionFromContext(ctx) - options := &types.ImageBuildOptions{} - if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else { - options.Remove = httputils.BoolValue(r, "rm") - } - if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { - options.PullParent = true - } - - options.Dockerfile = r.FormValue("dockerfile") - options.SuppressOutput = httputils.BoolValue(r, "q") - options.NoCache = httputils.BoolValue(r, "nocache") - options.ForceRemove = httputils.BoolValue(r, "forcerm") - options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") - options.Memory = httputils.Int64ValueOrZero(r, "memory") - options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") - options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") - options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") - options.CPUSetCPUs = r.FormValue("cpusetcpus") - options.CPUSetMems = r.FormValue("cpusetmems") - options.CgroupParent = r.FormValue("cgroupparent") - options.Tags = r.Form["t"] - - if r.Form.Get("shmsize") != "" { - shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) - if err != nil { - return nil, err - } - options.ShmSize = shmSize - } - - if i := container.Isolation(r.FormValue("isolation")); i != "" { - if !container.Isolation.IsValid(i) { - return nil, fmt.Errorf("Unsupported isolation: %q", i) - } - options.Isolation = i - } - - var buildUlimits = []*units.Ulimit{} - ulimitsJSON := r.FormValue("ulimits") - if ulimitsJSON != "" { - if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { - return nil, err - } - options.Ulimits = buildUlimits - } - - var buildArgs = map[string]string{} - buildArgsJSON := r.FormValue("buildargs") - if buildArgsJSON != "" { - if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { - return nil, err - } - options.BuildArgs = buildArgs - } - var labels = map[string]string{} - labelsJSON := r.FormValue("labels") - if labelsJSON != "" { - if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil { - return nil, err - } - options.Labels = labels - } - - return options, nil -} - -type syncWriter struct { - w io.Writer - mu sync.Mutex -} - -func (s *syncWriter) Write(b []byte) (count int, err error) { - s.mu.Lock() - count, err = s.w.Write(b) - s.mu.Unlock() - return -} - -func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfigs = map[string]types.AuthConfig{} - authConfigsEncoded = r.Header.Get("X-Registry-Config") - notVerboseBuffer = bytes.NewBuffer(nil) - ) - - if authConfigsEncoded != "" { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting - // to be empty. - } - } - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - sf := streamformatter.NewJSONStreamFormatter() - errf := func(err error) error { - if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { - output.Write(notVerboseBuffer.Bytes()) - } - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an internal error. - if !output.Flushed() { - return err - } - _, err = w.Write(sf.FormatError(err)) - if err != nil { - logrus.Warnf("could not write error response: %v", err) - } - return nil - } - - buildOptions, err := newImageBuildOptions(ctx, r) - if err != nil { - return errf(err) - } - buildOptions.AuthConfigs = authConfigs - - remoteURL := r.FormValue("remote") - - // Currently, only used if context is from a remote url. - // Look at code in DetectContextFromRemoteURL for more information. - createProgressReader := func(in io.ReadCloser) io.ReadCloser { - progressOutput := sf.NewProgressOutput(output, true) - if buildOptions.SuppressOutput { - progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) - } - return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) - } - - var out io.Writer = output - if buildOptions.SuppressOutput { - out = notVerboseBuffer - } - out = &syncWriter{w: out} - stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} - stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} - - pg := backend.ProgressWriter{ - Output: out, - StdoutFormatter: stdout, - StderrFormatter: stderr, - ProgressReaderFunc: createProgressReader, - } - - imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) - if err != nil { - return errf(err) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if buildOptions.SuppressOutput { - stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} - fmt.Fprintf(stdout, "%s\n", string(imgID)) - } - - return nil -} diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go deleted file mode 100644 index 444260af9..000000000 --- a/api/server/router/container/backend.go +++ /dev/null @@ -1,72 +0,0 @@ -package container - -import ( - "io" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/archive" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -// execBackend includes functions to implement to provide exec functionality. -type execBackend interface { - ContainerExecCreate(name string, config *types.ExecConfig) (string, error) - ContainerExecInspect(id string) (*backend.ExecInspect, error) - ContainerExecResize(name string, height, width int) error - ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error - ExecExists(name string) (bool, error) -} - -// copyBackend includes functions to implement to provide container copy functionality. -type copyBackend interface { - ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) - ContainerCopy(name string, res string) (io.ReadCloser, error) - ContainerExport(name string, out io.Writer) error - ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error - ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) -} - -// stateBackend includes functions to implement to provide container state lifecycle functionality. -type stateBackend interface { - ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - ContainerKill(name string, sig uint64) error - ContainerPause(name string) error - ContainerRename(oldName, newName string) error - ContainerResize(name string, height, width int) error - ContainerRestart(name string, seconds int) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error - ContainerStop(name string, seconds int) error - ContainerUnpause(name string) error - ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error) - ContainerWait(name string, timeout time.Duration) (int, error) -} - -// monitorBackend includes functions to implement to provide containers monitoring functionality. -type monitorBackend interface { - ContainerChanges(name string) ([]archive.Change, error) - ContainerInspect(name string, size bool, version string) (interface{}, error) - ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error - ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error - ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) - - Containers(config *types.ContainerListOptions) ([]*types.Container, error) -} - -// attachBackend includes function to implement to provide container attaching functionality. -type attachBackend interface { - ContainerAttach(name string, c *backend.ContainerAttachConfig) error -} - -// Backend is all the methods that need to be implemented to provide container specific functionality. -type Backend interface { - execBackend - copyBackend - stateBackend - monitorBackend - attachBackend -} diff --git a/api/server/router/container/container.go b/api/server/router/container/container.go deleted file mode 100644 index d6fea4c35..000000000 --- a/api/server/router/container/container.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -type validationError struct { - error -} - -func (validationError) IsValidationError() bool { - return true -} - -// containerRouter is a router to talk with the container controller -type containerRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new container router -func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { - r := &containerRouter{ - backend: b, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the container controller -func (r *containerRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in container router -func (r *containerRouter) initRoutes() { - r.routes = []router.Route{ - // HEAD - router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), - // GET - router.NewGetRoute("/containers/json", r.getContainersJSON), - router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), - router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), - router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), - router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), - router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), - router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), - router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), - // POST - router.NewPostRoute("/containers/create", r.postContainersCreate), - router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), - router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), - router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), - router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), - router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), - router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), - router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), - router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), - router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), - router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 - router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), - router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), - router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), - router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), - router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), - // PUT - router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), - // DELETE - router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), - } -} diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go deleted file mode 100644 index 977ce2522..000000000 --- a/api/server/router/container/container_routes.go +++ /dev/null @@ -1,527 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" - "golang.org/x/net/websocket" -) - -func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - config := &types.ContainerListOptions{ - All: httputils.BoolValue(r, "all"), - Size: httputils.BoolValue(r, "size"), - Since: r.Form.Get("since"), - Before: r.Form.Get("before"), - Filter: filter, - } - - if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - limit, err := strconv.Atoi(tmpLimit) - if err != nil { - return err - } - config.Limit = limit - } - - containers, err := s.backend.Containers(config) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, containers) -} - -func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - stream := httputils.BoolValueOrDefault(r, "stream", true) - if !stream { - w.Header().Set("Content-Type", "application/json") - } - - config := &backend.ContainerStatsConfig{ - Stream: stream, - OutStream: w, - Version: string(httputils.VersionFromContext(ctx)), - } - - return s.backend.ContainerStats(ctx, vars["name"], config) -} - -func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // Args are validated before the stream starts because when it starts we're - // sending HTTP 200 by writing an empty chunk of data to tell the client that - // daemon is going to stream. By sending this initial HTTP 200 we can't report - // any error after the stream starts (i.e. container not found, wrong parameters) - // with the appropriate status code. - stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - - containerName := vars["name"] - logsConfig := &backend.ContainerLogsConfig{ - ContainerLogsOptions: types.ContainerLogsOptions{ - Follow: httputils.BoolValue(r, "follow"), - Timestamps: httputils.BoolValue(r, "timestamps"), - Since: r.Form.Get("since"), - Tail: r.Form.Get("tail"), - ShowStdout: stdout, - ShowStderr: stderr, - Details: httputils.BoolValue(r, "details"), - }, - OutStream: w, - } - - chStarted := make(chan struct{}) - if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { - select { - case <-chStarted: - // The client may be expecting all of the data we're sending to - // be multiplexed, so send it through OutStream, which will - // have been set up to handle that if needed. - fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) - default: - return err - } - } - - return nil -} - -func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.backend.ContainerExport(vars["name"], w) -} - -func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // https://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - - version := httputils.VersionFromContext(ctx) - var hostConfig *container.HostConfig - // A non-nil json object is at least 7 characters. - if r.ContentLength > 7 || r.ContentLength == -1 { - if versions.GreaterThanOrEqualTo(version, "1.24") { - return validationError{fmt.Errorf("starting container with HostConfig was deprecated since v1.10 and removed in v1.12")} - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - c, err := s.decoder.DecodeHostConfig(r.Body) - if err != nil { - return err - } - hostConfig = c - } - - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - if err := s.backend.ContainerStart(vars["name"], hostConfig, validateHostname); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - seconds, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -type errContainerIsRunning interface { - ContainerIsRunning() bool -} - -func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var sig syscall.Signal - name := vars["name"] - - // If we have a signal, look at it. Otherwise, do nothing - if sigStr := r.Form.Get("signal"); sigStr != "" { - var err error - if sig, err = signal.ParseSignal(sigStr); err != nil { - return err - } - } - - if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { - var isStopped bool - if e, ok := err.(errContainerIsRunning); ok { - isStopped = !e.ContainerIsRunning() - } - - // Return error that's not caused because the container is stopped. - // Return error if the container is not running and the api is >= 1.20 - // to keep backwards compatibility. - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { - return fmt.Errorf("Cannot kill container %s: %v", name, err) - } - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - timeout, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerPause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerUnpause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ - StatusCode: status, - }) -} - -func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - changes, err := s.backend.ContainerChanges(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, changes) -} - -func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, procList) -} - -func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - newName := r.Form.Get("name") - if err := s.backend.ContainerRename(name, newName); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - var updateConfig container.UpdateConfig - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&updateConfig); err != nil { - return err - } - - hostConfig := &container.HostConfig{ - Resources: updateConfig.Resources, - RestartPolicy: updateConfig.RestartPolicy, - } - - name := vars["name"] - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - warnings, err := s.backend.ContainerUpdate(name, hostConfig, validateHostname) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ - Warnings: warnings, - }) -} - -func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - name := r.Form.Get("name") - - config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) - if err != nil { - return err - } - version := httputils.VersionFromContext(ctx) - adjustCPUShares := versions.LessThan(version, "1.19") - - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ - Name: name, - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - AdjustCPUShares: adjustCPUShares, - }, validateHostname) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, ccr) -} - -func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.ContainerRmConfig{ - ForceRemove: httputils.BoolValue(r, "force"), - RemoveVolume: httputils.BoolValue(r, "v"), - RemoveLink: httputils.BoolValue(r, "link"), - } - - if err := s.backend.ContainerRm(name, config); err != nil { - // Force a 404 for the empty string - if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { - return fmt.Errorf("no such container: \"\"") - } - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerResize(vars["name"], height, width) -} - -func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - err := httputils.ParseForm(r) - if err != nil { - return err - } - containerName := vars["name"] - - _, upgrade := r.Header["Upgrade"] - detachKeys := r.FormValue("detachKeys") - - hijacker, ok := w.(http.Hijacker) - if !ok { - return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) - } - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - conn, _, err := hijacker.Hijack() - if err != nil { - return nil, nil, nil, err - } - - // set raw mode - conn.Write([]byte{}) - - if upgrade { - fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - closer := func() error { - httputils.CloseStreams(conn) - return nil - } - return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - UseStdin: httputils.BoolValue(r, "stdin"), - UseStdout: httputils.BoolValue(r, "stdout"), - UseStderr: httputils.BoolValue(r, "stderr"), - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - MuxStreams: true, - } - - if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - // Remember to close stream if error happens - conn, _, errHijack := hijacker.Hijack() - if errHijack == nil { - statusCode := httputils.GetHTTPErrorStatusCode(err) - statusText := http.StatusText(statusCode) - fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) - httputils.CloseStreams(conn) - } else { - logrus.Errorf("Error Hijacking: %v", err) - } - } - return nil -} - -func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - containerName := vars["name"] - - var err error - detachKeys := r.FormValue("detachKeys") - - done := make(chan struct{}) - started := make(chan struct{}) - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - wsChan := make(chan *websocket.Conn) - h := func(conn *websocket.Conn) { - wsChan <- conn - <-done - } - - srv := websocket.Server{Handler: h, Handshake: nil} - go func() { - close(started) - srv.ServeHTTP(w, r) - }() - - conn := <-wsChan - return conn, conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - UseStdin: true, - UseStdout: true, - UseStderr: true, - MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr - } - - err = s.backend.ContainerAttach(containerName, attachConfig) - close(done) - select { - case <-started: - logrus.Errorf("Error attaching websocket: %s", err) - return nil - default: - } - return err -} diff --git a/api/server/router/container/copy.go b/api/server/router/container/copy.go deleted file mode 100644 index 554b40e8b..000000000 --- a/api/server/router/container/copy.go +++ /dev/null @@ -1,119 +0,0 @@ -package container - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -// postContainersCopy is deprecated in favor of getContainersArchive. -func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // Deprecated since 1.8, Errors out since 1.12 - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.24") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cfg := types.CopyConfig{} - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - return err - } - - if cfg.Resource == "" { - return fmt.Errorf("Path cannot be empty") - } - - data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "no such container") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if os.IsNotExist(err) { - return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) - } - return err - } - defer data.Close() - - w.Header().Set("Content-Type", "application/x-tar") - if _, err := io.Copy(w, data); err != nil { - return err - } - - return nil -} - -// // Encode the stat to JSON, base64 encode, and place in a header. -func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { - statJSON, err := json.Marshal(stat) - if err != nil { - return err - } - - header.Set( - "X-Docker-Container-Path-Stat", - base64.StdEncoding.EncodeToString(statJSON), - ) - - return nil -} - -func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - stat, err := s.backend.ContainerStatPath(v.Name, v.Path) - if err != nil { - return err - } - - return setContainerPathStatHeader(stat, w.Header()) -} - -func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) - if err != nil { - return err - } - defer tarArchive.Close() - - if err := setContainerPathStatHeader(stat, w.Header()); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - _, err = io.Copy(w, tarArchive) - - return err -} - -func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") - return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) -} diff --git a/api/server/router/container/exec.go b/api/server/router/container/exec.go deleted file mode 100644 index 21f5dc830..000000000 --- a/api/server/router/container/exec.go +++ /dev/null @@ -1,134 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - eConfig, err := s.backend.ContainerExecInspect(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, eConfig) -} - -func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - name := vars["name"] - - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { - return err - } - - if len(execConfig.Cmd) == 0 { - return fmt.Errorf("No exec command specified") - } - - // Register an instance of Exec in container. - id, err := s.backend.ContainerExecCreate(name, execConfig) - if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %v", name, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: id, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - if versions.GreaterThan(version, "1.21") { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - } - - var ( - execName = vars["name"] - stdin, inStream io.ReadCloser - stdout, stderr, outStream io.Writer - ) - - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { - return err - } - - if exists, err := s.backend.ExecExists(execName); !exists { - return err - } - - if !execStartCheck.Detach { - var err error - // Setting up the streaming http interface. - inStream, outStream, err = httputils.HijackConnection(w) - if err != nil { - return err - } - defer httputils.CloseStreams(inStream, outStream) - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - stdin = inStream - stdout = outStream - if !execStartCheck.Tty { - stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - } - - // Now run the user process in container. - // Maybe we should we pass ctx here if we're not detaching? - if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { - if execStartCheck.Detach { - return err - } - stdout.Write([]byte(err.Error() + "\r\n")) - logrus.Errorf("Error running exec in container: %v", err) - } - return nil -} - -func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerExecResize(vars["name"], height, width) -} diff --git a/api/server/router/container/inspect.go b/api/server/router/container/inspect.go deleted file mode 100644 index dbbced7ee..000000000 --- a/api/server/router/container/inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -// getContainersByName inspects container's configuration and serializes it as json. -func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - displaySize := httputils.BoolValue(r, "size") - - version := httputils.VersionFromContext(ctx) - json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, json) -} diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go deleted file mode 100644 index 08101736e..000000000 --- a/api/server/router/image/backend.go +++ /dev/null @@ -1,43 +0,0 @@ -package image - -import ( - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -// Backend is all the methods that need to be implemented -// to provide image specific functionality. -type Backend interface { - containerBackend - imageBackend - importExportBackend - registryBackend -} - -type containerBackend interface { - Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) -} - -type imageBackend interface { - ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) - ImageHistory(imageName string) ([]*types.ImageHistory, error) - Images(filterArgs string, filter string, all bool) ([]*types.Image, error) - LookupImage(name string) (*types.ImageInspect, error) - TagImage(imageName, repository, tag string) error -} - -type importExportBackend interface { - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - ExportImage(names []string, outStream io.Writer) error -} - -type registryBackend interface { - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) -} diff --git a/api/server/router/image/image.go b/api/server/router/image/image.go deleted file mode 100644 index 71c95e1e3..000000000 --- a/api/server/router/image/image.go +++ /dev/null @@ -1,49 +0,0 @@ -package image - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -// imageRouter is a router to talk with the image controller -type imageRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new image router -func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { - r := &imageRouter{ - backend: backend, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the image controller -func (r *imageRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in the image router -func (r *imageRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/images/json", r.getImagesJSON), - router.NewGetRoute("/images/search", r.getImagesSearch), - router.NewGetRoute("/images/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), - router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), - // POST - router.NewPostRoute("/commit", r.postCommit), - router.NewPostRoute("/images/load", r.postImagesLoad), - router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), - router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), - router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), - // DELETE - router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), - } -} diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go deleted file mode 100644 index 18a36fda6..000000000 --- a/api/server/router/image/image_routes.go +++ /dev/null @@ -1,319 +0,0 @@ -package image - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cname := r.Form.Get("container") - - pause := httputils.BoolValue(r, "pause") - version := httputils.VersionFromContext(ctx) - if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { - pause = true - } - - c, _, _, err := s.decoder.DecodeConfig(r.Body) - if err != nil && err != io.EOF { //Do not fail if body is empty. - return err - } - if c == nil { - c = &container.Config{} - } - - commitCfg := &backend.ContainerCommitConfig{ - ContainerCommitConfig: types.ContainerCommitConfig{ - Pause: pause, - Repo: r.Form.Get("repo"), - Tag: r.Form.Get("tag"), - Author: r.Form.Get("author"), - Comment: r.Form.Get("comment"), - Config: c, - MergeConfigs: true, - }, - Changes: r.Form["changes"], - } - - imgID, err := s.backend.Commit(cname, commitCfg) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ - ID: string(imgID), - }) -} - -// Creates an image from Pull or from Import -func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - message = r.Form.Get("message") - err error - output = ioutils.NewWriteFlusher(w) - ) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if image != "" { //pull - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } - - err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) - } else { //import - src := r.Form.Get("fromSrc") - // 'err' MUST NOT be defined within this block, we need any error - // generated from the download to be available to the output - // stream processing below - err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) - } - if err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - - return nil -} - -func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := httputils.ParseForm(r); err != nil { - return err - } - authConfig := &types.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) - } - } - - image := vars["name"] - tag := r.Form.Get("tag") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - var names []string - if name, ok := vars["name"]; ok { - names = []string{name} - } else { - names = r.Form["names"] - } - - if err := s.backend.ExportImage(names, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - quiet := httputils.BoolValueOrDefault(r, "quiet", true) - - if !quiet { - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) - } - return nil - } - return s.backend.LoadImage(r.Body, w, quiet) -} - -func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - - if strings.TrimSpace(name) == "" { - return fmt.Errorf("image name cannot be blank") - } - - force := httputils.BoolValue(r, "force") - prune := !httputils.BoolValue(r, "noprune") - - list, err := s.backend.ImageDelete(name, force, prune) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - imageInspect, err := s.backend.LookupImage(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, imageInspect) -} - -func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // FIXME: The filter parameter could just be a match filter - images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, images) -} - -func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - name := vars["name"] - history, err := s.backend.ImageHistory(name) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, history) -} - -func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - var ( - config *types.AuthConfig - authEncoded = r.Header.Get("X-Registry-Auth") - headers = map[string][]string{} - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &types.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - headers[k] = v - } - } - limit := registry.DefaultSearchLimit - if r.Form.Get("limit") != "" { - limitValue, err := strconv.Atoi(r.Form.Get("limit")) - if err != nil { - return err - } - limit = limitValue - } - query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, query.Results) -} diff --git a/api/server/router/local.go b/api/server/router/local.go deleted file mode 100644 index 7cb2a5a2f..000000000 --- a/api/server/router/local.go +++ /dev/null @@ -1,96 +0,0 @@ -package router - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -// localRoute defines an individual API route to connect -// with the docker daemon. It implements Route. -type localRoute struct { - method string - path string - handler httputils.APIFunc -} - -// Handler returns the APIFunc to let the server wrap it in middlewares. -func (l localRoute) Handler() httputils.APIFunc { - return l.handler -} - -// Method returns the http method that the route responds to. -func (l localRoute) Method() string { - return l.method -} - -// Path returns the subpath where the route responds to. -func (l localRoute) Path() string { - return l.path -} - -// NewRoute initializes a new local route for the router. -func NewRoute(method, path string, handler httputils.APIFunc) Route { - return localRoute{method, path, handler} -} - -// NewGetRoute initializes a new route with the http method GET. -func NewGetRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("GET", path, handler) -} - -// NewPostRoute initializes a new route with the http method POST. -func NewPostRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("POST", path, handler) -} - -// NewPutRoute initializes a new route with the http method PUT. -func NewPutRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("PUT", path, handler) -} - -// NewDeleteRoute initializes a new route with the http method DELETE. -func NewDeleteRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("DELETE", path, handler) -} - -// NewOptionsRoute initializes a new route with the http method OPTIONS. -func NewOptionsRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("OPTIONS", path, handler) -} - -// NewHeadRoute initializes a new route with the http method HEAD. -func NewHeadRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("HEAD", path, handler) -} - -func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if notifier, ok := w.(http.CloseNotifier); ok { - notify := notifier.CloseNotify() - notifyCtx, cancel := context.WithCancel(ctx) - finished := make(chan struct{}) - defer close(finished) - ctx = notifyCtx - go func() { - select { - case <-notify: - cancel() - case <-finished: - } - }() - } - return h(ctx, w, r, vars) - } -} - -// Cancellable makes new route which embeds http.CloseNotifier feature to -// context.Context of handler. -func Cancellable(r Route) Route { - return localRoute{ - method: r.Method(), - path: r.Path(), - handler: cancellableHandler(r.Handler()), - } -} diff --git a/api/server/router/network/backend.go b/api/server/router/network/backend.go deleted file mode 100644 index 6e322fa37..000000000 --- a/api/server/router/network/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package network - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -// Backend is all the methods that need to be implemented -// to provide network specific functionality. -type Backend interface { - FindNetwork(idName string) (libnetwork.Network, error) - GetNetworkByName(idName string) (libnetwork.Network, error) - GetNetworksByID(partialID string) []libnetwork.Network - GetNetworks() []libnetwork.Network - CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error - DeleteNetwork(name string) error -} diff --git a/api/server/router/network/filter.go b/api/server/router/network/filter.go deleted file mode 100644 index 8168413d1..000000000 --- a/api/server/router/network/filter.go +++ /dev/null @@ -1,96 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -var ( - // AcceptedFilters is an acceptable filters for validation - AcceptedFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - } -) - -func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) { - switch netType { - case "builtin": - for _, nw := range nws { - if runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - case "custom": - for _, nw := range nws { - if !runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - default: - return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) - } - return retNws, nil -} - -// filterNetworks filters network list according to user specified filter -// and returns user chosen networks -func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { - // if filter is empty, return original network list - if filter.Len() == 0 { - return nws, nil - } - - if err := filter.Validate(AcceptedFilters); err != nil { - return nil, err - } - - displayNet := []types.NetworkResource{} - for _, nw := range nws { - if filter.Include("driver") { - if !filter.ExactMatch("driver", nw.Driver) { - continue - } - } - if filter.Include("name") { - if !filter.Match("name", nw.Name) { - continue - } - } - if filter.Include("id") { - if !filter.Match("id", nw.ID) { - continue - } - } - if filter.Include("label") { - if !filter.MatchKVList("label", nw.Labels) { - continue - } - } - displayNet = append(displayNet, nw) - } - - if filter.Include("type") { - var typeNet []types.NetworkResource - errFilter := filter.WalkValues("type", func(fval string) error { - passList, err := filterNetworkByType(displayNet, fval) - if err != nil { - return err - } - typeNet = append(typeNet, passList...) - return nil - }) - if errFilter != nil { - return nil, errFilter - } - displayNet = typeNet - } - - return displayNet, nil -} diff --git a/api/server/router/network/network.go b/api/server/router/network/network.go deleted file mode 100644 index 8688c3ed1..000000000 --- a/api/server/router/network/network.go +++ /dev/null @@ -1,42 +0,0 @@ -package network - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/daemon/cluster" -) - -// networkRouter is a router to talk with the network controller -type networkRouter struct { - backend Backend - clusterProvider *cluster.Cluster - routes []router.Route -} - -// NewRouter initializes a new network router -func NewRouter(b Backend, c *cluster.Cluster) router.Router { - r := &networkRouter{ - backend: b, - clusterProvider: c, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the network controller -func (r *networkRouter) Routes() []router.Route { - return r.routes -} - -func (r *networkRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/networks", r.getNetworksList), - router.NewGetRoute("/networks/{id:.*}", r.getNetwork), - // POST - router.NewPostRoute("/networks/create", r.postNetworkCreate), - router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), - router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), - // DELETE - router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), - } -} diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go deleted file mode 100644 index ebf2ce287..000000000 --- a/api/server/router/network/network_routes.go +++ /dev/null @@ -1,285 +0,0 @@ -package network - -import ( - "encoding/json" - "net/http" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - filter := r.Form.Get("filters") - netFilters, err := filters.FromParam(filter) - if err != nil { - return err - } - - list := []types.NetworkResource{} - - if nr, err := n.clusterProvider.GetNetworks(); err == nil { - for _, nw := range nr { - list = append(list, nw) - } - } - - // Combine the network list returned by Docker daemon if it is not already - // returned by the cluster manager -SKIP: - for _, nw := range n.backend.GetNetworks() { - for _, nl := range list { - if nl.ID == nw.ID() { - continue SKIP - } - } - list = append(list, *n.buildNetworkResource(nw)) - } - - list, err = filterNetworks(list, netFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { - return httputils.WriteJSON(w, http.StatusOK, nr) - } - return err - } - return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw)) -} - -func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var create types.NetworkCreateRequest - - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&create); err != nil { - return err - } - - if _, err := n.clusterProvider.GetNetwork(create.Name); err == nil { - return libnetwork.NetworkNameError(create.Name) - } - - nw, err := n.backend.CreateNetwork(create) - if err != nil { - if _, ok := err.(libnetwork.ManagerRedirectError); !ok { - return err - } - id, err := n.clusterProvider.CreateNetwork(create) - if err != nil { - return err - } - nw = &types.NetworkCreateResponse{ID: id} - } - - return httputils.WriteJSON(w, http.StatusCreated, nw) -} - -func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var connect types.NetworkConnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name(), connect.EndpointConfig) -} - -func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var disconnect types.NetworkDisconnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force) -} - -func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { - return n.clusterProvider.RemoveNetwork(vars["id"]) - } - if err := n.backend.DeleteNetwork(vars["id"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { - r := &types.NetworkResource{} - if nw == nil { - return r - } - - info := nw.Info() - r.Name = nw.Name() - r.ID = nw.ID() - r.Scope = info.Scope() - if n.clusterProvider.IsManager() { - if _, err := n.clusterProvider.GetNetwork(nw.Name()); err == nil { - r.Scope = "swarm" - } - } else if info.Dynamic() { - r.Scope = "swarm" - } - r.Driver = nw.Type() - r.EnableIPv6 = info.IPv6Enabled() - r.Internal = info.Internal() - r.Options = info.DriverOptions() - r.Containers = make(map[string]types.EndpointResource) - buildIpamResources(r, info) - r.Internal = info.Internal() - r.Labels = info.Labels() - - epl := nw.Endpoints() - for _, e := range epl { - ei := e.Info() - if ei == nil { - continue - } - sb := ei.Sandbox() - key := "ep-" + e.ID() - if sb != nil { - key = sb.ContainerID() - } - - r.Containers[key] = buildEndpointResource(e) - } - return r -} - -func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { - id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() - - ipv4Info, ipv6Info := nwInfo.IpamInfo() - - r.IPAM.Driver = id - - r.IPAM.Options = opts - - r.IPAM.Config = []network.IPAMConfig{} - for _, ip4 := range ipv4conf { - if ip4.PreferredPool == "" { - continue - } - iData := network.IPAMConfig{} - iData.Subnet = ip4.PreferredPool - iData.IPRange = ip4.SubPool - iData.Gateway = ip4.Gateway - iData.AuxAddress = ip4.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if len(r.IPAM.Config) == 0 { - for _, ip4Info := range ipv4Info { - iData := network.IPAMConfig{} - iData.Subnet = ip4Info.IPAMData.Pool.String() - iData.Gateway = ip4Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } - - hasIpv6Conf := false - for _, ip6 := range ipv6conf { - if ip6.PreferredPool == "" { - continue - } - hasIpv6Conf = true - iData := network.IPAMConfig{} - iData.Subnet = ip6.PreferredPool - iData.IPRange = ip6.SubPool - iData.Gateway = ip6.Gateway - iData.AuxAddress = ip6.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if !hasIpv6Conf { - for _, ip6Info := range ipv6Info { - iData := network.IPAMConfig{} - iData.Subnet = ip6Info.IPAMData.Pool.String() - iData.Gateway = ip6Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } -} - -func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { - er := types.EndpointResource{} - if e == nil { - return er - } - - er.EndpointID = e.ID() - er.Name = e.Name() - ei := e.Info() - if ei == nil { - return er - } - - if iface := ei.Iface(); iface != nil { - if mac := iface.MacAddress(); mac != nil { - er.MacAddress = mac.String() - } - if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { - er.IPv4Address = ip.String() - } - - if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { - er.IPv6Address = ipv6.String() - } - } - return er -} diff --git a/api/server/router/plugin/backend.go b/api/server/router/plugin/backend.go deleted file mode 100644 index 0eb4f5b8f..000000000 --- a/api/server/router/plugin/backend.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build experimental - -package plugin - -import ( - "net/http" - - enginetypes "github.com/docker/engine-api/types" -) - -// Backend for Plugin -type Backend interface { - Disable(name string) error - Enable(name string) error - List() ([]enginetypes.Plugin, error) - Inspect(name string) (enginetypes.Plugin, error) - Remove(name string) error - Set(name string, args []string) error - Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) - Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error -} diff --git a/api/server/router/plugin/plugin.go b/api/server/router/plugin/plugin.go deleted file mode 100644 index 999ba6b74..000000000 --- a/api/server/router/plugin/plugin.go +++ /dev/null @@ -1,23 +0,0 @@ -package plugin - -import "github.com/docker/docker/api/server/router" - -// pluginRouter is a router to talk with the plugin controller -type pluginRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new plugin router -func NewRouter(b Backend) router.Router { - r := &pluginRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the plugin controller -func (r *pluginRouter) Routes() []router.Route { - return r.routes -} diff --git a/api/server/router/plugin/plugin_experimental.go b/api/server/router/plugin/plugin_experimental.go deleted file mode 100644 index 4e437ee65..000000000 --- a/api/server/router/plugin/plugin_experimental.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build experimental - -package plugin - -import ( - "github.com/docker/docker/api/server/router" -) - -func (r *pluginRouter) initRoutes() { - r.routes = []router.Route{ - router.NewGetRoute("/plugins", r.listPlugins), - router.NewGetRoute("/plugins/{name:.*}", r.inspectPlugin), - router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), - router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? - router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), - router.NewPostRoute("/plugins/pull", r.pullPlugin), - router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin), - router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), - } -} diff --git a/api/server/router/plugin/plugin_regular.go b/api/server/router/plugin/plugin_regular.go deleted file mode 100644 index f987faecd..000000000 --- a/api/server/router/plugin/plugin_regular.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package plugin - -func (r *pluginRouter) initRoutes() {} - -// Backend is empty so that the package can compile in non-experimental -// (Needed by volume driver) -type Backend interface{} diff --git a/api/server/router/plugin/plugin_routes.go b/api/server/router/plugin/plugin_routes.go deleted file mode 100644 index dfdde7248..000000000 --- a/api/server/router/plugin/plugin_routes.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build experimental - -package plugin - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - // Get X-Registry-Auth - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - authConfig = &types.AuthConfig{} - } - } - - privileges, err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, privileges) -} - -func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Enable(vars["name"]) -} - -func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Disable(vars["name"]) -} - -func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Remove(vars["name"]) -} - -func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - // Get X-Registry-Auth - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - authConfig = &types.AuthConfig{} - } - } - return pr.backend.Push(vars["name"], metaHeaders, authConfig) -} - -func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var args []string - if err := json.NewDecoder(r.Body).Decode(&args); err != nil { - return err - } - return pr.backend.Set(vars["name"], args) -} - -func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - l, err := pr.backend.List() - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, l) -} - -func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - result, err := pr.backend.Inspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, result) -} diff --git a/api/server/router/router.go b/api/server/router/router.go deleted file mode 100644 index 2de25c27f..000000000 --- a/api/server/router/router.go +++ /dev/null @@ -1,19 +0,0 @@ -package router - -import "github.com/docker/docker/api/server/httputils" - -// Router defines an interface to specify a group of routes to add to the docker server. -type Router interface { - // Routes returns the list of routes to add to the docker server. - Routes() []Route -} - -// Route defines an individual API route in the docker server. -type Route interface { - // Handler returns the raw function to create the http handler. - Handler() httputils.APIFunc - // Method returns the http method that the route responds to. - Method() string - // Path returns the subpath where the route responds to. - Path() string -} diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go deleted file mode 100644 index f9ee0a1bb..000000000 --- a/api/server/router/swarm/backend.go +++ /dev/null @@ -1,26 +0,0 @@ -package swarm - -import ( - basictypes "github.com/docker/engine-api/types" - types "github.com/docker/engine-api/types/swarm" -) - -// Backend abstracts an swarm commands manager. -type Backend interface { - Init(req types.InitRequest) (string, error) - Join(req types.JoinRequest) error - Leave(force bool) error - Inspect() (types.Swarm, error) - Update(uint64, types.Spec) error - GetServices(basictypes.ServiceListOptions) ([]types.Service, error) - GetService(string) (types.Service, error) - CreateService(types.ServiceSpec, string) (string, error) - UpdateService(string, uint64, types.ServiceSpec, string) error - RemoveService(string) error - GetNodes(basictypes.NodeListOptions) ([]types.Node, error) - GetNode(string) (types.Node, error) - UpdateNode(string, uint64, types.NodeSpec) error - RemoveNode(string) error - GetTasks(basictypes.TaskListOptions) ([]types.Task, error) - GetTask(string) (types.Task, error) -} diff --git a/api/server/router/swarm/cluster.go b/api/server/router/swarm/cluster.go deleted file mode 100644 index a67ffa963..000000000 --- a/api/server/router/swarm/cluster.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import "github.com/docker/docker/api/server/router" - -// buildRouter is a router to talk with the build controller -type swarmRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &swarmRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the swarm controller -func (sr *swarmRouter) Routes() []router.Route { - return sr.routes -} - -func (sr *swarmRouter) initRoutes() { - sr.routes = []router.Route{ - router.NewPostRoute("/swarm/init", sr.initCluster), - router.NewPostRoute("/swarm/join", sr.joinCluster), - router.NewPostRoute("/swarm/leave", sr.leaveCluster), - router.NewGetRoute("/swarm", sr.inspectCluster), - router.NewPostRoute("/swarm/update", sr.updateCluster), - router.NewGetRoute("/services", sr.getServices), - router.NewGetRoute("/services/{id:.*}", sr.getService), - router.NewPostRoute("/services/create", sr.createService), - router.NewPostRoute("/services/{id:.*}/update", sr.updateService), - router.NewDeleteRoute("/services/{id:.*}", sr.removeService), - router.NewGetRoute("/nodes", sr.getNodes), - router.NewGetRoute("/nodes/{id:.*}", sr.getNode), - router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode), - router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode), - router.NewGetRoute("/tasks", sr.getTasks), - router.NewGetRoute("/tasks/{id:.*}", sr.getTask), - } -} diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go deleted file mode 100644 index d91b4a972..000000000 --- a/api/server/router/swarm/cluster_routes.go +++ /dev/null @@ -1,235 +0,0 @@ -package swarm - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - basictypes "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - types "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.InitRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - nodeID, err := sr.backend.Init(req) - if err != nil { - logrus.Errorf("Error initializing swarm: %v", err) - return err - } - return httputils.WriteJSON(w, http.StatusOK, nodeID) -} - -func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.JoinRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - return sr.backend.Join(req) -} - -func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - force := httputils.BoolValue(r, "force") - return sr.backend.Leave(force) -} - -func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - swarm, err := sr.backend.Inspect() - if err != nil { - logrus.Errorf("Error getting swarm: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, swarm) -} - -func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var swarm types.Spec - if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error()) - } - - if err := sr.backend.Update(version, swarm); err != nil { - logrus.Errorf("Error configuring swarm: %v", err) - return err - } - return nil -} - -func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting services: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, services) -} - -func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - service, err := sr.backend.GetService(vars["id"]) - if err != nil { - logrus.Errorf("Error getting service %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, service) -} - -func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - // Get returns "" if the header does not exist - encodedAuth := r.Header.Get("X-Registry-Auth") - - id, err := sr.backend.CreateService(service, encodedAuth) - if err != nil { - logrus.Errorf("Error creating service %s: %v", id, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ServiceCreateResponse{ - ID: id, - }) -} - -func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error()) - } - - // Get returns "" if the header does not exist - encodedAuth := r.Header.Get("X-Registry-Auth") - - if err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth); err != nil { - logrus.Errorf("Error updating service %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveService(vars["id"]); err != nil { - logrus.Errorf("Error removing service %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting nodes: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, nodes) -} - -func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - node, err := sr.backend.GetNode(vars["id"]) - if err != nil { - logrus.Errorf("Error getting node %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, node) -} - -func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var node types.NodeSpec - if err := json.NewDecoder(r.Body).Decode(&node); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error()) - } - - if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { - logrus.Errorf("Error updating node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveNode(vars["id"]); err != nil { - logrus.Errorf("Error removing node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting tasks: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, tasks) -} - -func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - task, err := sr.backend.GetTask(vars["id"]) - if err != nil { - logrus.Errorf("Error getting task %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, task) -} diff --git a/api/server/router/system/backend.go b/api/server/router/system/backend.go deleted file mode 100644 index 83609ab55..000000000 --- a/api/server/router/system/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import ( - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// Backend is the methods that need to be implemented to provide -// system specific functionality. -type Backend interface { - SystemInfo() (*types.Info, error) - SystemVersion() types.Version - SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(chan interface{}) - AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) -} diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go deleted file mode 100644 index e5742c9fe..000000000 --- a/api/server/router/system/system.go +++ /dev/null @@ -1,38 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/daemon/cluster" -) - -// systemRouter provides information about the Docker system overall. -// It gathers information about host, daemon and container events. -type systemRouter struct { - backend Backend - clusterProvider *cluster.Cluster - routes []router.Route -} - -// NewRouter initializes a new system router -func NewRouter(b Backend, c *cluster.Cluster) router.Router { - r := &systemRouter{ - backend: b, - clusterProvider: c, - } - - r.routes = []router.Route{ - router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), - router.NewGetRoute("/_ping", pingHandler), - router.Cancellable(router.NewGetRoute("/events", r.getEvents)), - router.NewGetRoute("/info", r.getInfo), - router.NewGetRoute("/version", r.getVersion), - router.NewPostRoute("/auth", r.postAuth), - } - - return r -} - -// Routes returns all the API routes dedicated to the docker system -func (s *systemRouter) Routes() []router.Route { - return s.routes -} diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go deleted file mode 100644 index 8050301c9..000000000 --- a/api/server/router/system/system_routes.go +++ /dev/null @@ -1,154 +0,0 @@ -package system - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" -) - -func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} - -func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err -} - -func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.backend.SystemInfo() - if err != nil { - return err - } - if s.clusterProvider != nil { - info.Swarm = s.clusterProvider.Info() - } - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info := s.backend.SystemVersion() - info.APIVersion = api.DefaultVersion - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - since, err := eventTime(r.Form.Get("since")) - if err != nil { - return err - } - until, err := eventTime(r.Form.Get("until")) - if err != nil { - return err - } - - var ( - timeout <-chan time.Time - onlyPastEvents bool - ) - if !until.IsZero() { - if until.Before(since) { - return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) - } - - now := time.Now() - - onlyPastEvents = until.Before(now) - - if !onlyPastEvents { - dur := until.Sub(now) - timeout = time.NewTimer(dur).C - } - } - - ef, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - defer output.Close() - output.Flush() - - enc := json.NewEncoder(output) - - buffered, l := s.backend.SubscribeToEvents(since, until, ef) - defer s.backend.UnsubscribeFromEvents(l) - - for _, ev := range buffered { - if err := enc.Encode(ev); err != nil { - return err - } - } - - if onlyPastEvents { - return nil - } - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - logrus.Warnf("unexpected event message: %q", ev) - continue - } - if err := enc.Encode(jev); err != nil { - return err - } - case <-timeout: - return nil - case <-ctx.Done(): - logrus.Debug("Client context cancelled, stop sending events") - return nil - } - } -} - -func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *types.AuthConfig - err := json.NewDecoder(r.Body).Decode(&config) - r.Body.Close() - if err != nil { - return err - } - status, token, err := s.backend.AuthenticateToRegistry(ctx, config) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ - Status: status, - IdentityToken: token, - }) -} - -func eventTime(formTime string) (time.Time, error) { - t, tNano, err := timetypes.ParseTimestamps(formTime, -1) - if err != nil { - return time.Time{}, err - } - if t == -1 { - return time.Time{}, nil - } - return time.Unix(t, tNano), nil -} diff --git a/api/server/router/volume/backend.go b/api/server/router/volume/backend.go deleted file mode 100644 index fbf5ed27f..000000000 --- a/api/server/router/volume/backend.go +++ /dev/null @@ -1,15 +0,0 @@ -package volume - -import ( - // TODO return types need to be refactored into pkg - "github.com/docker/engine-api/types" -) - -// Backend is the methods that need to be implemented to provide -// volume specific functionality -type Backend interface { - Volumes(filter string) ([]*types.Volume, []string, error) - VolumeInspect(name string) (*types.Volume, error) - VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) - VolumeRm(name string) error -} diff --git a/api/server/router/volume/volume.go b/api/server/router/volume/volume.go deleted file mode 100644 index 2683dcec5..000000000 --- a/api/server/router/volume/volume.go +++ /dev/null @@ -1,35 +0,0 @@ -package volume - -import "github.com/docker/docker/api/server/router" - -// volumeRouter is a router to talk with the volumes controller -type volumeRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new volume router -func NewRouter(b Backend) router.Router { - r := &volumeRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the volumes controller -func (r *volumeRouter) Routes() []router.Route { - return r.routes -} - -func (r *volumeRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/volumes", r.getVolumesList), - router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), - // POST - router.NewPostRoute("/volumes/create", r.postVolumesCreate), - // DELETE - router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), - } -} diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go deleted file mode 100644 index 5aa0d4a7a..000000000 --- a/api/server/router/volume/volume_routes.go +++ /dev/null @@ -1,66 +0,0 @@ -package volume - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) -} - -func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volume, err := v.backend.VolumeInspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, volume) -} - -func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var req types.VolumeCreateRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - - volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusCreated, volume) -} - -func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := v.backend.VolumeRm(vars["name"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} diff --git a/api/server/router_swapper.go b/api/server/router_swapper.go deleted file mode 100644 index 1ecc7a7f3..000000000 --- a/api/server/router_swapper.go +++ /dev/null @@ -1,30 +0,0 @@ -package server - -import ( - "net/http" - "sync" - - "github.com/gorilla/mux" -) - -// routerSwapper is an http.Handler that allows you to swap -// mux routers. -type routerSwapper struct { - mu sync.Mutex - router *mux.Router -} - -// Swap changes the old router with the new one. -func (rs *routerSwapper) Swap(newRouter *mux.Router) { - rs.mu.Lock() - rs.router = newRouter - rs.mu.Unlock() -} - -// ServeHTTP makes the routerSwapper to implement the http.Handler interface. -func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { - rs.mu.Lock() - router := rs.router - rs.mu.Unlock() - router.ServeHTTP(w, r) -} diff --git a/api/server/server.go b/api/server/server.go deleted file mode 100644 index b67fe5284..000000000 --- a/api/server/server.go +++ /dev/null @@ -1,207 +0,0 @@ -package server - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/errors" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// versionMatcher defines a variable matcher to be parsed by the router -// when a request is about to be served. -const versionMatcher = "/v{version:[0-9.]+}" - -// Config provides the configuration for the API server -type Config struct { - Logging bool - EnableCors bool - CorsHeaders string - Version string - SocketGroup string - TLSConfig *tls.Config -} - -// Server contains instance details for the server -type Server struct { - cfg *Config - servers []*HTTPServer - routers []router.Router - routerSwapper *routerSwapper - middlewares []middleware.Middleware -} - -// New returns a new instance of the server based on the specified configuration. -// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). -func New(cfg *Config) *Server { - return &Server{ - cfg: cfg, - } -} - -// UseMiddleware appends a new middleware to the request chain. -// This needs to be called before the API routes are configured. -func (s *Server) UseMiddleware(m middleware.Middleware) { - s.middlewares = append(s.middlewares, m) -} - -// Accept sets a listener the server accepts connections into. -func (s *Server) Accept(addr string, listeners ...net.Listener) { - for _, listener := range listeners { - httpServer := &HTTPServer{ - srv: &http.Server{ - Addr: addr, - }, - l: listener, - } - s.servers = append(s.servers, httpServer) - } -} - -// Close closes servers and thus stop receiving requests -func (s *Server) Close() { - for _, srv := range s.servers { - if err := srv.Close(); err != nil { - logrus.Error(err) - } - } -} - -// serveAPI loops through all initialized servers and spawns goroutine -// with Server method for each. It sets createMux() as Handler also. -func (s *Server) serveAPI() error { - var chErrors = make(chan error, len(s.servers)) - for _, srv := range s.servers { - srv.srv.Handler = s.routerSwapper - go func(srv *HTTPServer) { - var err error - logrus.Infof("API listen on %s", srv.l.Addr()) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }(srv) - } - - for i := 0; i < len(s.servers); i++ { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -// HTTPServer contains an instance of http server and the listener. -// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. -// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. -type HTTPServer struct { - srv *http.Server - l net.Listener -} - -// Serve starts listening for inbound requests. -func (s *HTTPServer) Serve() error { - return s.srv.Serve(s.l) -} - -// Close closes the HTTPServer from listening for the inbound requests. -func (s *HTTPServer) Close() error { - return s.l.Close() -} - -func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // Define the context that we'll pass around to share info - // like the docker-request-id. - // - // The 'context' will be used for global data that should - // apply to all requests. Data that is specific to the - // immediate function being called should still be passed - // as 'args' on the function call. - ctx := context.Background() - handlerFunc := s.handleWithGlobalMiddlewares(handler) - - vars := mux.Vars(r) - if vars == nil { - vars = make(map[string]string) - } - - if err := handlerFunc(ctx, w, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - httputils.MakeErrorHandler(err)(w, r) - } - } -} - -// InitRouter initializes the list of routers for the server. -// This method also enables the Go profiler if enableProfiler is true. -func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { - for _, r := range routers { - s.routers = append(s.routers, r) - } - - m := s.createMux() - if enableProfiler { - profilerSetup(m) - } - s.routerSwapper = &routerSwapper{ - router: m, - } -} - -// createMux initializes the main router the server uses. -func (s *Server) createMux() *mux.Router { - m := mux.NewRouter() - - logrus.Debug("Registering routers") - for _, apiRouter := range s.routers { - for _, r := range apiRouter.Routes() { - f := s.makeHTTPHandler(r.Handler()) - - logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) - m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) - m.Path(r.Path()).Methods(r.Method()).Handler(f) - } - } - - err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) - notFoundHandler := httputils.MakeErrorHandler(err) - m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) - m.NotFoundHandler = notFoundHandler - - return m -} - -// Wait blocks the server goroutine until it exits. -// It sends an error message if there is any error during -// the API execution. -func (s *Server) Wait(waitChan chan error) { - if err := s.serveAPI(); err != nil { - logrus.Errorf("ServeAPI error: %v", err) - waitChan <- err - return - } - waitChan <- nil -} - -// DisableProfiler reloads the server mux without adding the profiler routes. -func (s *Server) DisableProfiler() { - s.routerSwapper.Swap(s.createMux()) -} - -// EnableProfiler reloads the server mux adding the profiler routes. -func (s *Server) EnableProfiler() { - m := s.createMux() - profilerSetup(m) - s.routerSwapper.Swap(m) -} diff --git a/api/server/server_test.go b/api/server/server_test.go deleted file mode 100644 index 13c28eab7..000000000 --- a/api/server/server_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package server - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - - "golang.org/x/net/context" -) - -func TestMiddlewares(t *testing.T) { - cfg := &Config{ - Version: "0.1omega2", - } - srv := &Server{ - cfg: cfg, - } - - srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - - localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - - if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { - t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) - } - - return nil - } - - handlerFunc := srv.handleWithGlobalMiddlewares(localHandler) - if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { - t.Fatal(err) - } -} diff --git a/api/types/backend/backend.go b/api/types/backend/backend.go deleted file mode 100644 index c7b4f0175..000000000 --- a/api/types/backend/backend.go +++ /dev/null @@ -1,85 +0,0 @@ -// Package backend includes types to send information to server backends. -// TODO(calavera): This package is pending of extraction to engine-api -// when the server package is clean of daemon dependencies. -package backend - -import ( - "io" - - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/engine-api/types" -) - -// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. -type ContainerAttachConfig struct { - GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) - UseStdin bool - UseStdout bool - UseStderr bool - Logs bool - Stream bool - DetachKeys string - - // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. - // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... - // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. - // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. - MuxStreams bool -} - -// ContainerLogsConfig holds configs for logging operations. Exists -// for users of the backend to to pass it a logging configuration. -type ContainerLogsConfig struct { - types.ContainerLogsOptions - OutStream io.Writer -} - -// ContainerStatsConfig holds information for configuring the runtime -// behavior of a backend.ContainerStats() call. -type ContainerStatsConfig struct { - Stream bool - OutStream io.Writer - Version string -} - -// ExecInspect holds information about a running process started -// with docker exec. -type ExecInspect struct { - ID string - Running bool - ExitCode *int - ProcessConfig *ExecProcessConfig - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte -} - -// ExecProcessConfig holds information about the exec process -// running on the host. -type ExecProcessConfig struct { - Tty bool `json:"tty"` - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - Privileged *bool `json:"privileged,omitempty"` - User string `json:"user,omitempty"` -} - -// ContainerCommitConfig is a wrapper around -// types.ContainerCommitConfig that also -// transports configuration changes for a container. -type ContainerCommitConfig struct { - types.ContainerCommitConfig - Changes []string -} - -// ProgressWriter is an interface -// to transport progress streams. -type ProgressWriter struct { - Output io.Writer - StdoutFormatter *streamformatter.StdoutFormatter - StderrFormatter *streamformatter.StderrFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser -} diff --git a/builder/builder.go b/builder/builder.go deleted file mode 100644 index 125e56ab2..000000000 --- a/builder/builder.go +++ /dev/null @@ -1,155 +0,0 @@ -// Package builder defines interfaces for any Docker builder to implement. -// -// Historically, only server-side Dockerfile interpreters existed. -// This package allows for other implementations of Docker builders. -package builder - -import ( - "io" - "os" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/image" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName string = "Dockerfile" -) - -// Context represents a file system tree. -type Context interface { - // Close allows to signal that the filesystem tree won't be used anymore. - // For Context implementations using a temporary directory, it is recommended to - // delete the temporary directory in Close(). - Close() error - // Stat returns an entry corresponding to path if any. - // It is recommended to return an error if path was not found. - // If path is a symlink it also returns the path to the target file. - Stat(path string) (string, FileInfo, error) - // Open opens path from the context and returns a readable stream of it. - Open(path string) (io.ReadCloser, error) - // Walk walks the tree of the context with the function passed to it. - Walk(root string, walkFn WalkFunc) error -} - -// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). -type WalkFunc func(path string, fi FileInfo, err error) error - -// ModifiableContext represents a modifiable Context. -// TODO: remove this interface once we can get rid of Remove() -type ModifiableContext interface { - Context - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. -// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. -type FileInfo interface { - os.FileInfo - Path() string -} - -// PathFileInfo is a convenience struct that implements the FileInfo interface. -type PathFileInfo struct { - os.FileInfo - // FilePath holds the absolute path to the file. - FilePath string - // Name holds the basename for the file. - FileName string -} - -// Path returns the absolute path to the file. -func (fi PathFileInfo) Path() string { - return fi.FilePath -} - -// Name returns the basename of the file. -func (fi PathFileInfo) Name() string { - if fi.FileName != "" { - return fi.FileName - } - return fi.FileInfo.Name() -} - -// Hashed defines an extra method intended for implementations of os.FileInfo. -type Hashed interface { - // Hash returns the hash of a file. - Hash() string - SetHash(string) -} - -// HashedFileInfo is a convenient struct that augments FileInfo with a field. -type HashedFileInfo struct { - FileInfo - // FileHash represents the hash of a file. - FileHash string -} - -// Hash returns the hash of a file. -func (fi HashedFileInfo) Hash() string { - return fi.FileHash -} - -// SetHash sets the hash of a file. -func (fi *HashedFileInfo) SetHash(h string) { - fi.FileHash = h -} - -// Backend abstracts calls to a Docker Daemon. -type Backend interface { - // TODO: use digest reference instead of name - - // GetImageOnBuild looks up a Docker image referenced by `name`. - GetImageOnBuild(name string) (Image, error) - // TagImage tags an image with newTag - TagImageWithReference(image.ID, reference.Named) error - // PullOnBuild tells Docker to pull image referenced by `name`. - PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) - // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error - // ContainerCreate creates a new Docker container and returns potential warnings - ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, validateHostname bool) error - // ContainerWait stops processing until the given container is stopped. - ContainerWait(containerID string, timeout time.Duration) (int, error) - // ContainerUpdateCmdOnBuild updates container.Path and container.Args - ContainerUpdateCmdOnBuild(containerID string, cmd []string) error - - // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container - // specified by a container object. - // TODO: make an Extract method instead of passing `decompress` - // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used - // with Context.Walk - //ContainerCopy(name string, res string) (io.ReadCloser, error) - // TODO: use copyBackend api - CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error -} - -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config -} - -// ImageCache abstracts an image cache store. -// (parent image, child runconfig) -> child image -type ImageCache interface { - // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` - // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error) -} diff --git a/builder/context.go b/builder/context.go deleted file mode 100644 index 600f42319..000000000 --- a/builder/context.go +++ /dev/null @@ -1,260 +0,0 @@ -package builder - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/gitutils" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" -) - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// GetContextFromReader will read the contents of the given reader as either a -// Dockerfile or tar archive. Returns a tar archive used as a context and a -// path to the Dockerfile inside the tar. -func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { - buf := bufio.NewReader(r) - - magic, err := buf.Peek(archive.HeaderSize) - if err != nil && err != io.EOF { - return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) - } - - if archive.IsArchive(magic) { - return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil - } - - // Input should be read as a Dockerfile. - tmpDir, err := ioutil.TempDir("", "docker-build-context-") - if err != nil { - return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) - } - - f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) - if err != nil { - return nil, "", err - } - _, err = io.Copy(f, buf) - if err != nil { - f.Close() - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - if err := r.Close(); err != nil { - return nil, "", err - } - - tar, err := archive.Tar(tmpDir, archive.Uncompressed) - if err != nil { - return nil, "", err - } - - return ioutils.NewReadCloserWrapper(tar, func() error { - err := tar.Close() - os.RemoveAll(tmpDir) - return err - }), DefaultDockerfileName, nil - -} - -// GetContextFromGitURL uses a Git URL as context for a `docker build`. The -// git repo is cloned into a temporary directory used as the context directory. -// Returns the absolute path to the temporary context directory, the relative -// path of the dockerfile in that context directory, and a non-nil error on -// success. -func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { - if _, err := exec.LookPath("git"); err != nil { - return "", "", fmt.Errorf("unable to find 'git': %v", err) - } - if absContextDir, err = gitutils.Clone(gitURL); err != nil { - return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) - } - - return getDockerfileRelPath(absContextDir, dockerfileName) -} - -// GetContextFromURL uses a remote URL as context for a `docker build`. The -// remote resource is downloaded as either a Dockerfile or a tar archive. -// Returns the tar archive used for the context and a path of the -// dockerfile inside the tar. -func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { - response, err := httputils.Download(remoteURL) - if err != nil { - return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) - } - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) - - // Pass the response body through a progress reader. - progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) - - return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) -} - -// GetContextFromLocalDir uses the given local directory as context for a -// `docker build`. Returns the absolute path to the local context directory, -// the relative path of the dockerfile in that context directory, and a non-nil -// error on success. -func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { - // When using a local context directory, when the Dockerfile is specified - // with the `-f/--file` option then it is considered relative to the - // current directory and not the context directory. - if dockerfileName != "" { - if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { - return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) - } - } - - return getDockerfileRelPath(localDir, dockerfileName) -} - -// getDockerfileRelPath uses the given context directory for a `docker build` -// and returns the absolute path to the context directory, the relative path of -// the dockerfile in that context directory, and a non-nil error on success. -func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { - if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) - } - - // The context dir might be a symbolic link, so follow it to the actual - // target directory. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absContextDir) { - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) - } - } - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) - } - - if !stat.IsDir() { - return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) - } - - absDockerfile := givenDockerfile - if absDockerfile == "" { - // No -f/--file was specified so use the default relative to the - // context directory. - absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { - altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) - if _, err = os.Lstat(altPath); err == nil { - absDockerfile = altPath - } - } - } - - // If not already an absolute path, the Dockerfile path should be joined to - // the base directory. - if !filepath.IsAbs(absDockerfile) { - absDockerfile = filepath.Join(absContextDir, absDockerfile) - } - - // Evaluate symlinks in the path to the Dockerfile too. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absDockerfile) { - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) - } - } - - if _, err := os.Lstat(absDockerfile); err != nil { - if os.IsNotExist(err) { - return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) - } - return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) - } - - if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { - return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) - } - - if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { - return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) - } - - return absContextDir, relDockerfile, nil -} - -// isUNC returns true if the path is UNC (one starting \\). It always returns -// false on Linux. -func isUNC(path string) bool { - return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) -} diff --git a/builder/context_test.go b/builder/context_test.go deleted file mode 100644 index 27d29d79f..000000000 --- a/builder/context_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package builder - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -var prepareEmpty = func(t *testing.T) (string, func()) { - return "", func() {} -} - -var prepareNoFiles = func(t *testing.T) (string, func()) { - return createTestTempDir(t, "", "builder-context-test") -} - -var prepareOneFile = func(t *testing.T) (string, func()) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - return contextDir, cleanup -} - -func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { - contextDir, cleanup := prepare(t) - defer cleanup() - - err := ValidateContextDirectory(contextDir, excludes) - - if err != nil { - t.Fatalf("Error should be nil, got: %s", err) - } -} - -func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { - contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") - defer dirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirLocalFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } - -} - -func TestGetContextFromReaderString(t *testing.T) { - tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - _, err = tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromReaderTar(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar: %s", err) - } - - tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - header, err := tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - if header.Name != DefaultDockerfileName { - t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestValidateContextDirectoryEmptyContext(t *testing.T) { - // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. - // The test will ultimately end up calling filepath.Abs(""). On Windows, - // golang will error. On Linux, golang will return /. Due to there being - // drive letters on Windows, this is probably the correct behaviour for - // Windows. - if runtime.GOOS == "windows" { - t.Skip("Invalid test on Windows") - } - testValidateContextDirectory(t, prepareEmpty, []string{}) -} - -func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { - testValidateContextDirectory(t, prepareNoFiles, []string{}) -} - -func TestValidateContextDirectoryWithOneFile(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{}) -} - -func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) -} diff --git a/builder/context_unix.go b/builder/context_unix.go deleted file mode 100644 index d1f72e057..000000000 --- a/builder/context_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package builder - -import ( - "path/filepath" -) - -func getContextRoot(srcPath string) (string, error) { - return filepath.Join(srcPath, "."), nil -} diff --git a/builder/context_windows.go b/builder/context_windows.go deleted file mode 100644 index b8ba2ba23..000000000 --- a/builder/context_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package builder - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/builder/dockerfile/bflag.go b/builder/dockerfile/bflag.go deleted file mode 100644 index c2e6c7dae..000000000 --- a/builder/dockerfile/bflag.go +++ /dev/null @@ -1,176 +0,0 @@ -package dockerfile - -import ( - "fmt" - "strings" -) - -// FlagType is the type of the build flag -type FlagType int - -const ( - boolType FlagType = iota - stringType -) - -// BFlags contains all flags information for the builder -type BFlags struct { - Args []string // actual flags/args from cmd line - flags map[string]*Flag - used map[string]*Flag - Err error -} - -// Flag contains all information for a flag -type Flag struct { - bf *BFlags - name string - flagType FlagType - Value string -} - -// NewBFlags return the new BFlags struct -func NewBFlags() *BFlags { - return &BFlags{ - flags: make(map[string]*Flag), - used: make(map[string]*Flag), - } -} - -// AddBool adds a bool flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddBool(name string, def bool) *Flag { - flag := bf.addFlag(name, boolType) - if flag == nil { - return nil - } - if def { - flag.Value = "true" - } else { - flag.Value = "false" - } - return flag -} - -// AddString adds a string flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddString(name string, def string) *Flag { - flag := bf.addFlag(name, stringType) - if flag == nil { - return nil - } - flag.Value = def - return flag -} - -// addFlag is a generic func used by the other AddXXX() func -// to add a new flag to the BFlags struct. -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { - if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) - return nil - } - - newFlag := &Flag{ - bf: bf, - name: name, - flagType: flagType, - } - bf.flags[name] = newFlag - - return newFlag -} - -// IsUsed checks if the flag is used -func (fl *Flag) IsUsed() bool { - if _, ok := fl.bf.used[fl.name]; ok { - return true - } - return false -} - -// IsTrue checks if a bool flag is true -func (fl *Flag) IsTrue() bool { - if fl.flagType != boolType { - // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) - } - return fl.Value == "true" -} - -// Parse parses and checks if the BFlags is valid. -// Any error noticed during the AddXXX() funcs will be generated/returned -// here. We do this because an error during AddXXX() is more like a -// compile time error so it doesn't matter too much when we stop our -// processing as long as we do stop it, so this allows the code -// around AddXXX() to be just: -// defFlag := AddString("description", "") -// w/o needing to add an if-statement around each one. -func (bf *BFlags) Parse() error { - // If there was an error while defining the possible flags - // go ahead and bubble it back up here since we didn't do it - // earlier in the processing - if bf.Err != nil { - return fmt.Errorf("Error setting up flags: %s", bf.Err) - } - - for _, arg := range bf.Args { - if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("Arg should start with -- : %s", arg) - } - - if arg == "--" { - return nil - } - - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } - - flag, ok := bf.flags[arg] - if !ok { - return fmt.Errorf("Unknown flag: %s", arg) - } - - if _, ok = bf.used[arg]; ok { - return fmt.Errorf("Duplicate flag specified: %s", arg) - } - - bf.used[arg] = flag - - switch flag.flagType { - case boolType: - // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - - lower := strings.ToLower(value) - if lower == "" { - flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { - return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) - } - - case stringType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.Value = value - - default: - panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) - } - - } - - return nil -} diff --git a/builder/dockerfile/bflag_test.go b/builder/dockerfile/bflag_test.go deleted file mode 100644 index 65cfceadd..000000000 --- a/builder/dockerfile/bflag_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package dockerfile - -import ( - "testing" -) - -func TestBuilderFlags(t *testing.T) { - var expected string - var err error - - // --- - - bf := NewBFlags() - bf.Args = []string{} - if err := bf.Parse(); err != nil { - t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - bf.Args = []string{"--"} - if err := bf.Parse(); err != nil { - t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - flStr1 := bf.AddString("str1", "") - flBool1 := bf.AddBool("bool1", false) - bf.Args = []string{} - if err = bf.Parse(); err != nil { - t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.IsUsed() == true { - t.Fatalf("Test3 - str1 was not used!") - } - if flBool1.IsUsed() == true { - t.Fatalf("Test3 - bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "HI" { - t.Fatalf("Str1 was supposed to default to: HI") - } - if flBool1.IsTrue() { - t.Fatalf("Bool1 was supposed to default to: false") - } - if flStr1.IsUsed() == true { - t.Fatalf("Str1 was not used!") - } - if flBool1.IsUsed() == true { - t.Fatalf("Bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1="} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "BYE" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatalf("Test-b1 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=true"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatalf("Test-b2 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flBool1.IsTrue() { - t.Fatalf("Test-b3 Bool1 was supposed to be false") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool2"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1", "--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "BYE" { - t.Fatalf("Teset %s, str1 should be BYE", bf.Args) - } - if !flBool1.IsTrue() { - t.Fatalf("Teset %s, bool1 should be true", bf.Args) - } -} diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go deleted file mode 100644 index a7f96c6f1..000000000 --- a/builder/dockerfile/builder.go +++ /dev/null @@ -1,322 +0,0 @@ -package dockerfile - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -var validCommitCommands = map[string]bool{ - "cmd": true, - "entrypoint": true, - "healthcheck": true, - "env": true, - "expose": true, - "label": true, - "onbuild": true, - "user": true, - "volume": true, - "workdir": true, -} - -// BuiltinAllowedBuildArgs is list of built-in allowed build args -var BuiltinAllowedBuildArgs = map[string]bool{ - "HTTP_PROXY": true, - "http_proxy": true, - "HTTPS_PROXY": true, - "https_proxy": true, - "FTP_PROXY": true, - "ftp_proxy": true, - "NO_PROXY": true, - "no_proxy": true, -} - -// Builder is a Dockerfile builder -// It implements the builder.Backend interface. -type Builder struct { - options *types.ImageBuildOptions - - Stdout io.Writer - Stderr io.Writer - Output io.Writer - - docker builder.Backend - context builder.Context - clientCtx context.Context - cancel context.CancelFunc - - dockerfile *parser.Node - runConfig *container.Config // runconfig for cmd, run, entrypoint etc. - flags *BFlags - tmpContainers map[string]struct{} - image string // imageID - noBaseImage bool - maintainer string - cmdSet bool - disableCommit bool - cacheBusted bool - allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. - - // TODO: remove once docker.Commit can receive a tag - id string -} - -// BuildManager implements builder.Backend and is shared across all Builder objects. -type BuildManager struct { - backend builder.Backend -} - -// NewBuildManager creates a BuildManager. -func NewBuildManager(b builder.Backend) (bm *BuildManager) { - return &BuildManager{backend: b} -} - -// BuildFromContext builds a new image from a given context. -func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { - buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) - if err != nil { - return "", err - } - defer func() { - if err := buildContext.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) - } - }() - if len(dockerfileName) > 0 { - buildOptions.Dockerfile = dockerfileName - } - b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil) - if err != nil { - return "", err - } - return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) -} - -// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. -// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, -// will be read from the Context passed to Build(). -func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { - if config == nil { - config = new(types.ImageBuildOptions) - } - if config.BuildArgs == nil { - config.BuildArgs = make(map[string]string) - } - ctx, cancel := context.WithCancel(clientCtx) - b = &Builder{ - clientCtx: ctx, - cancel: cancel, - options: config, - Stdout: os.Stdout, - Stderr: os.Stderr, - docker: backend, - context: buildContext, - runConfig: new(container.Config), - tmpContainers: map[string]struct{}{}, - id: stringid.GenerateNonCryptoID(), - allowedBuildArgs: make(map[string]bool), - } - if dockerfile != nil { - b.dockerfile, err = parser.Parse(dockerfile) - if err != nil { - return nil, err - } - } - - return b, nil -} - -// sanitizeRepoAndTags parses the raw "t" parameter received from the client -// to a slice of repoAndTag. -// It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { - var ( - repoAndTags []reference.Named - // This map is used for deduplicating the "-t" parameter. - uniqNames = make(map[string]struct{}) - ) - for _, repo := range names { - if repo == "" { - continue - } - - ref, err := reference.ParseNamed(repo) - if err != nil { - return nil, err - } - - ref = reference.WithDefaultTag(ref) - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("build tag cannot contain a digest") - } - - if _, isTagged := ref.(reference.NamedTagged); !isTagged { - ref, err = reference.WithTag(ref, reference.DefaultTag) - if err != nil { - return nil, err - } - } - - nameWithTag := ref.String() - - if _, exists := uniqNames[nameWithTag]; !exists { - uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, ref) - } - } - return repoAndTags, nil -} - -// build runs the Dockerfile builder from a context and a docker object that allows to make calls -// to Docker. -// -// This will (barring errors): -// -// * read the dockerfile from context -// * parse the dockerfile if not already parsed -// * walk the AST and execute it by dispatching to handlers. If Remove -// or ForceRemove is set, additional cleanup around containers happens after -// processing. -// * Tag image, if applicable. -// * Print a happy message and return the image ID. -// -func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { - b.Stdout = stdout - b.Stderr = stderr - b.Output = out - - // If Dockerfile was not parsed yet, extract it from the Context - if b.dockerfile == nil { - if err := b.readDockerfile(); err != nil { - return "", err - } - } - - repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) - if err != nil { - return "", err - } - - if len(b.options.Labels) > 0 { - line := "LABEL " - for k, v := range b.options.Labels { - line += fmt.Sprintf("%q=%q ", k, v) - } - _, node, err := parser.ParseLine(line) - if err != nil { - return "", err - } - b.dockerfile.Children = append(b.dockerfile.Children, node) - } - - var shortImgID string - for i, n := range b.dockerfile.Children { - select { - case <-b.clientCtx.Done(): - logrus.Debug("Builder: build cancelled!") - fmt.Fprintf(b.Stdout, "Build cancelled") - return "", fmt.Errorf("Build cancelled") - default: - // Not cancelled yet, keep going... - } - if err := b.dispatch(i, n); err != nil { - if b.options.ForceRemove { - b.clearTmp() - } - return "", err - } - - shortImgID = stringid.TruncateID(b.image) - fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) - if b.options.Remove { - b.clearTmp() - } - } - - // check if there are any leftover build-args that were passed but not - // consumed during build. Return an error, if there are any. - leftoverArgs := []string{} - for arg := range b.options.BuildArgs { - if !b.isBuildArgAllowed(arg) { - leftoverArgs = append(leftoverArgs, arg) - } - } - if len(leftoverArgs) > 0 { - return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) - } - - if b.image == "" { - return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") - } - - imageID := image.ID(b.image) - for _, rt := range repoAndTags { - if err := b.docker.TagImageWithReference(imageID, rt); err != nil { - return "", err - } - } - - fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) - return b.image, nil -} - -// Cancel cancels an ongoing Dockerfile build. -func (b *Builder) Cancel() { - b.cancel() -} - -// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile -// It will: -// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. -// - Do build by calling builder.dispatch() to call all entries' handling routines -// -// BuildFromConfig is used by the /commit endpoint, with the changes -// coming from the query parameter of the same name. -// -// TODO: Remove? -func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { - ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) - if err != nil { - return nil, err - } - - // ensure that the commands are valid - for _, n := range ast.Children { - if !validCommitCommands[n.Value] { - return nil, fmt.Errorf("%s is not a valid change command", n.Value) - } - } - - b, err := NewBuilder(context.Background(), nil, nil, nil, nil) - if err != nil { - return nil, err - } - b.runConfig = config - b.Stdout = ioutil.Discard - b.Stderr = ioutil.Discard - b.disableCommit = true - - for i, n := range ast.Children { - if err := b.dispatch(i, n); err != nil { - return nil, err - } - } - - return b.runConfig, nil -} diff --git a/builder/dockerfile/builder_unix.go b/builder/dockerfile/builder_unix.go deleted file mode 100644 index 76a7ce74f..000000000 --- a/builder/dockerfile/builder_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package dockerfile - -var defaultShell = []string{"/bin/sh", "-c"} diff --git a/builder/dockerfile/builder_windows.go b/builder/dockerfile/builder_windows.go deleted file mode 100644 index 37e9fbcf4..000000000 --- a/builder/dockerfile/builder_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package dockerfile - -var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/builder/dockerfile/command/command.go b/builder/dockerfile/command/command.go deleted file mode 100644 index f23c6874b..000000000 --- a/builder/dockerfile/command/command.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package command contains the set of Dockerfile commands. -package command - -// Define constants for the command strings -const ( - Add = "add" - Arg = "arg" - Cmd = "cmd" - Copy = "copy" - Entrypoint = "entrypoint" - Env = "env" - Expose = "expose" - From = "from" - Healthcheck = "healthcheck" - Label = "label" - Maintainer = "maintainer" - Onbuild = "onbuild" - Run = "run" - Shell = "shell" - StopSignal = "stopsignal" - User = "user" - Volume = "volume" - Workdir = "workdir" -) - -// Commands is list of all Dockerfile commands -var Commands = map[string]struct{}{ - Add: {}, - Arg: {}, - Cmd: {}, - Copy: {}, - Entrypoint: {}, - Env: {}, - Expose: {}, - From: {}, - Healthcheck: {}, - Label: {}, - Maintainer: {}, - Onbuild: {}, - Run: {}, - Shell: {}, - StopSignal: {}, - User: {}, - Volume: {}, - Workdir: {}, -} diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go deleted file mode 100644 index 5c9e89b94..000000000 --- a/builder/dockerfile/dispatchers.go +++ /dev/null @@ -1,760 +0,0 @@ -package dockerfile - -// This file contains the dispatchers for each command. Note that -// `nullDispatch` is not actually a command, but support for commands we parse -// but do nothing with. -// -// See evaluator.go for a higher level discussion of the whole evaluator -// package. - -import ( - "fmt" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// ENV foo bar -// -// Sets the environment variable foo to bar, also makes interpolation -// in the dockerfile available from the next statement on via ${foo}. -// -func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ENV") - } - - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("ENV") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - // TODO/FIXME/NOT USED - // Just here to show how to use the builder flags stuff within the - // context of a builder command. Will remove once we actually add - // a builder command to something! - /* - flBool1 := b.flags.AddBool("bool1", false) - flStr1 := b.flags.AddString("str1", "HI") - - if err := b.flags.Parse(); err != nil { - return err - } - - fmt.Printf("Bool1:%v\n", flBool1) - fmt.Printf("Str1:%v\n", flStr1) - */ - - commitStr := "ENV" - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - gotOne := false - for i, envVar := range b.runConfig.Env { - envParts := strings.SplitN(envVar, "=", 2) - if envParts[0] == args[j] { - b.runConfig.Env[i] = newVar - gotOne = true - break - } - } - if !gotOne { - b.runConfig.Env = append(b.runConfig.Env, newVar) - } - j++ - } - - return b.commit("", b.runConfig.Cmd, commitStr) -} - -// MAINTAINER some text -// -// Sets the maintainer metadata. -func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("MAINTAINER") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - b.maintainer = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) -} - -// LABEL some json data describing the image -// -// Sets the Label variable foo to bar, -// -func label(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("LABEL") - } - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("LABEL") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - commitStr := "LABEL" - - if b.runConfig.Labels == nil { - b.runConfig.Labels = map[string]string{} - } - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - b.runConfig.Labels[args[j]] = args[j+1] - j++ - } - return b.commit("", b.runConfig.Cmd, commitStr) -} - -// ADD foo /path -// -// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling -// exist here. If you do not wish to have this automatic handling, use COPY. -// -func add(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastOneArgument("ADD") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - return b.runContextCommand(args, true, true, "ADD") -} - -// COPY foo /path -// -// Same as 'ADD' but without the tar and remote url handling. -// -func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastOneArgument("COPY") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - return b.runContextCommand(args, false, false, "COPY") -} - -// FROM imagename -// -// This sets the image the dockerfile will build on top of. -// -func from(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("FROM") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - name := args[0] - - var ( - image builder.Image - err error - ) - - // Windows cannot support a container with no base image. - if name == api.NoBaseImageSpecifier { - if runtime.GOOS == "windows" { - return fmt.Errorf("Windows does not support FROM scratch") - } - b.image = "" - b.noBaseImage = true - } else { - // TODO: don't use `name`, instead resolve it to a digest - if !b.options.PullParent { - image, err = b.docker.GetImageOnBuild(name) - // TODO: shouldn't we error out if error is different from "not found" ? - } - if image == nil { - image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) - if err != nil { - return err - } - } - } - - return b.processImageFrom(image) -} - -// ONBUILD RUN echo yo -// -// ONBUILD triggers run when the image is used in a FROM statement. -// -// ONBUILD handling has a lot of special-case functionality, the heading in -// evaluator.go and comments around dispatch() in the same file explain the -// special cases. search for 'OnBuild' in internals.go for additional special -// cases. -// -func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ONBUILD") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - - original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") - - b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) -} - -// WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. -// -func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("WORKDIR") - } - - err := b.flags.Parse() - if err != nil { - return err - } - - // This is from the Dockerfile and will not necessarily be in platform - // specific semantics, hence ensure it is converted. - b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) - if err != nil { - return err - } - - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", b.runConfig.WorkingDir)) -} - -// RUN some command yo -// -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: -// -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi -// -func run(b *Builder, args []string, attributes map[string]bool, original string) error { - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - args = handleJSONArgs(args, attributes) - - if !attributes["json"] { - args = append(getShell(b.runConfig), args...) - } - config := &container.Config{ - Cmd: strslice.StrSlice(args), - Image: b.image, - } - - // stash the cmd - cmd := b.runConfig.Cmd - if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { - b.runConfig.Cmd = config.Cmd - } - - // stash the config environment - env := b.runConfig.Env - - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - defer func(env []string) { b.runConfig.Env = env }(env) - - // derive the net build-time environment for this run. We let config - // environment override the build time environment. - // This means that we take the b.buildArgs list of env vars and remove - // any of those variables that are defined as part of the container. In other - // words, anything in b.Config.Env. What's left is the list of build-time env - // vars that we need to add to each RUN command - note the list could be empty. - // - // We don't persist the build time environment with container's config - // environment, but just sort and prepend it to the command string at time - // of commit. - // This helps with tracing back the image's actual environment at the time - // of RUN, without leaking it to the final image. It also aids cache - // lookup for same image built with same build time environment. - cmdBuildEnv := []string{} - configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue - } - if _, ok := configEnv[key]; !ok { - cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) - } - } - - // derive the command to use for probeCache() and to commit in this container. - // Note that we only do this if there are any build-time env vars. Also, we - // use the special argument "|#" at the start of the args array. This will - // avoid conflicts with any RUN command since commands can not - // start with | (vertical bar). The "#" (number of build envs) is there to - // help ensure proper cache matches. We don't want a RUN command - // that starts with "foo=abc" to be considered part of a build-time env var. - saveCmd := config.Cmd - if len(cmdBuildEnv) > 0 { - sort.Strings(cmdBuildEnv) - tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) - saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) - } - - b.runConfig.Cmd = saveCmd - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - // set Cmd manually, this is special case only for Dockerfiles - b.runConfig.Cmd = config.Cmd - // set build-time environment for 'run'. - b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) - // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true - - logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) - - cID, err := b.create() - if err != nil { - return err - } - - if err := b.run(cID); err != nil { - return err - } - - // revert to original config environment and set the command string to - // have the build-time env vars in it (if any) so that future cache look-ups - // properly match it. - b.runConfig.Env = env - b.runConfig.Cmd = saveCmd - return b.commit(cID, cmd, "run") -} - -// CMD foo -// -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - cmdSlice := handleJSONArgs(args, attributes) - - if !attributes["json"] { - cmdSlice = append(getShell(b.runConfig), cmdSlice...) - } - - b.runConfig.Cmd = strslice.StrSlice(cmdSlice) - // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { - return err - } - - if len(args) != 0 { - b.cmdSet = true - } - - return nil -} - -// parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and is not positive. -func parseOptInterval(f *Flag) (time.Duration, error) { - s := f.Value - if s == "" { - return 0, nil - } - d, err := time.ParseDuration(s) - if err != nil { - return 0, err - } - if d <= 0 { - return 0, fmt.Errorf("Interval %#v must be positive", f.name) - } - return d, nil -} - -// HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return fmt.Errorf("HEALTHCHECK requires an argument") - } - typ := strings.ToUpper(args[0]) - args = args[1:] - if typ == "NONE" { - if len(args) != 0 { - return fmt.Errorf("HEALTHCHECK NONE takes no arguments") - } - test := strslice.StrSlice{typ} - b.runConfig.Healthcheck = &container.HealthConfig{ - Test: test, - } - } else { - if b.runConfig.Healthcheck != nil { - oldCmd := b.runConfig.Healthcheck.Test - if len(oldCmd) > 0 && oldCmd[0] != "NONE" { - fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) - } - } - - healthcheck := container.HealthConfig{} - - flInterval := b.flags.AddString("interval", "") - flTimeout := b.flags.AddString("timeout", "") - flRetries := b.flags.AddString("retries", "") - - if err := b.flags.Parse(); err != nil { - return err - } - - switch typ { - case "CMD": - cmdSlice := handleJSONArgs(args, attributes) - if len(cmdSlice) == 0 { - return fmt.Errorf("Missing command after HEALTHCHECK CMD") - } - - if !attributes["json"] { - typ = "CMD-SHELL" - } - - healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) - default: - return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) - } - - interval, err := parseOptInterval(flInterval) - if err != nil { - return err - } - healthcheck.Interval = interval - - timeout, err := parseOptInterval(flTimeout) - if err != nil { - return err - } - healthcheck.Timeout = timeout - - if flRetries.Value != "" { - retries, err := strconv.ParseInt(flRetries.Value, 10, 32) - if err != nil { - return err - } - if retries < 1 { - return fmt.Errorf("--retries must be at least 1 (not %d)", retries) - } - healthcheck.Retries = int(retries) - } else { - healthcheck.Retries = 0 - } - - b.runConfig.Healthcheck = &healthcheck - } - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)); err != nil { - return err - } - - return nil -} - -// ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint -// is initialized at NewBuilder time instead of through argument parsing. -// -func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - parsed := handleJSONArgs(args, attributes) - - switch { - case attributes["json"]: - // ENTRYPOINT ["echo", "hi"] - b.runConfig.Entrypoint = strslice.StrSlice(parsed) - case len(parsed) == 0: - // ENTRYPOINT [] - b.runConfig.Entrypoint = nil - default: - // ENTRYPOINT echo hi - b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) - } - - // when setting the entrypoint if a CMD was not explicitly set then - // set the command to nil - if !b.cmdSet { - b.runConfig.Cmd = nil - } - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { - return err - } - - return nil -} - -// EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// b.runConfig.ExposedPorts for runconfig. -// -func expose(b *Builder, args []string, attributes map[string]bool, original string) error { - portsTab := args - - if len(args) == 0 { - return errAtLeastOneArgument("EXPOSE") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - if b.runConfig.ExposedPorts == nil { - b.runConfig.ExposedPorts = make(nat.PortSet) - } - - ports, _, err := nat.ParsePortSpecs(portsTab) - if err != nil { - return err - } - - // instead of using ports directly, we build a list of ports and sort it so - // the order is consistent. This prevents cache burst where map ordering - // changes between builds - portList := make([]string, len(ports)) - var i int - for port := range ports { - if _, exists := b.runConfig.ExposedPorts[port]; !exists { - b.runConfig.ExposedPorts[port] = struct{}{} - } - portList[i] = string(port) - i++ - } - sort.Strings(portList) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) -} - -// USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. -// -func user(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("USER") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - b.runConfig.User = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) -} - -// VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. -// -func volume(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("VOLUME") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - if b.runConfig.Volumes == nil { - b.runConfig.Volumes = map[string]struct{}{} - } - for _, v := range args { - v = strings.TrimSpace(v) - if v == "" { - return fmt.Errorf("Volume specified can not be an empty string") - } - b.runConfig.Volumes[v] = struct{}{} - } - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { - return err - } - return nil -} - -// STOPSIGNAL signal -// -// Set the signal that will be used to kill the container. -func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return fmt.Errorf("STOPSIGNAL requires exactly one argument") - } - - sig := args[0] - _, err := signal.ParseSignal(sig) - if err != nil { - return err - } - - b.runConfig.StopSignal = sig - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) -} - -// ARG name[=value] -// -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. -func arg(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return fmt.Errorf("ARG requires exactly one argument definition") - } - - var ( - name string - value string - hasDefault bool - ) - - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - name = parts[0] - value = parts[1] - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.allowedBuildArgs[name] = true - - // If there is a default value associated with this arg then add it to the - // b.buildArgs if one is not already passed to the builder. The args passed - // to builder override the default value of 'arg'. - if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { - b.options.BuildArgs[name] = value - } - - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) -} - -// SHELL powershell -command -// -// Set the non-default shell to use. -func shell(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - shellSlice := handleJSONArgs(args, attributes) - switch { - case len(shellSlice) == 0: - // SHELL [] - return errAtLeastOneArgument("SHELL") - case attributes["json"]: - // SHELL ["powershell", "-command"] - b.runConfig.Shell = strslice.StrSlice(shellSlice) - default: - // SHELL powershell -command - not JSON - return errNotJSON("SHELL", original) - } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) -} - -func errAtLeastOneArgument(command string) error { - return fmt.Errorf("%s requires at least one argument", command) -} - -func errExactlyOneArgument(command string) error { - return fmt.Errorf("%s requires exactly one argument", command) -} - -func errTooManyArguments(command string) error { - return fmt.Errorf("Bad input to %s, too many arguments", command) -} - -// getShell is a helper function which gets the right shell for prefixing the -// shell-form of RUN, ENTRYPOINT and CMD instructions -func getShell(c *container.Config) []string { - if 0 == len(c.Shell) { - return defaultShell[:] - } - return c.Shell[:] -} diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go deleted file mode 100644 index 60c40da4f..000000000 --- a/builder/dockerfile/dispatchers_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package dockerfile - -import ( - "fmt" - "runtime" - "strings" - "testing" - - "github.com/docker/engine-api/types/container" -) - -type commandWithFunction struct { - name string - function func(args []string) error -} - -func TestCommandsExactlyOneArgument(t *testing.T) { - commands := []commandWithFunction{ - {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, - {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, - {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, - {"USER", func(args []string) error { return user(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := fmt.Sprintf("%s requires exactly one argument", command.name) - - if err.Error() != expectedError { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } -} - -func TestCommandsAtLeastOneArgument(t *testing.T) { - commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, - {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, - {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}, - {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, - {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, - {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := fmt.Sprintf("%s requires at least one argument", command.name) - - if err.Error() != expectedError { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } -} - -func TestCommandsTooManyArguments(t *testing.T) { - commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{"arg1", "arg2", "arg3"}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := fmt.Sprintf("Bad input to %s, too many arguments", command.name) - - if err.Error() != expectedError { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } -} - -func TestEnv2Variables(t *testing.T) { - variables := []string{"var1", "val1", "var2", "val2"} - - bflags := &BFlags{} - config := &container.Config{} - - b := &Builder{flags: bflags, runConfig: config, disableCommit: true} - - if err := env(b, variables, nil, ""); err != nil { - t.Fatalf("Error when executing env: %s", err.Error()) - } - - expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) - expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) - - if b.runConfig.Env[0] != expectedVar1 { - t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) - } - - if b.runConfig.Env[1] != expectedVar2 { - t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) - } -} - -func TestMaintainer(t *testing.T) { - maintainerEntry := "Some Maintainer " - - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { - t.Fatalf("Error when executing maintainer: %s", err.Error()) - } - - if b.maintainer != maintainerEntry { - t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) - } -} - -func TestLabel(t *testing.T) { - labelName := "label" - labelValue := "value" - - labelEntry := []string{labelName, labelValue} - - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := label(b, labelEntry, nil, ""); err != nil { - t.Fatalf("Error when executing label: %s", err.Error()) - } - - if val, ok := b.runConfig.Labels[labelName]; ok { - if val != labelValue { - t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) - } - } else { - t.Fatalf("Label %s should be present but it is not", labelName) - } -} - -func TestFrom(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - err := from(b, []string{"scratch"}, nil, "") - - if runtime.GOOS == "windows" { - if err == nil { - t.Fatalf("Error not set on Windows") - } - - expectedError := "Windows does not support FROM scratch" - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Error message not correct on Windows. Should be: %s, got: %s", expectedError, err.Error()) - } - } else { - if err != nil { - t.Fatalf("Error when executing from: %s", err.Error()) - } - - if b.image != "" { - t.Fatalf("Image shoule be empty, got: %s", b.image) - } - - if b.noBaseImage != true { - t.Fatalf("Image should not have any base image, got: %s", b.noBaseImage) - } - } -} - -func TestOnbuildIllegalTriggers(t *testing.T) { - triggers := []struct{ command, expectedError string }{ - {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, - {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, - {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} - - for _, trigger := range triggers { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - err := onbuild(b, []string{trigger.command}, nil, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if !strings.Contains(err.Error(), trigger.expectedError) { - t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) - } - } -} - -func TestOnbuild(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - expectedOnbuild := "ADD . /app/src" - - if b.runConfig.OnBuild[0] != expectedOnbuild { - t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) - } -} diff --git a/builder/dockerfile/dispatchers_unix.go b/builder/dockerfile/dispatchers_unix.go deleted file mode 100644 index 8b0dfc391..000000000 --- a/builder/dockerfile/dispatchers_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" -) - -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { - if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") - } - current = filepath.FromSlash(current) - requested = filepath.FromSlash(requested) - if !filepath.IsAbs(requested) { - return filepath.Join(string(os.PathSeparator), current, requested), nil - } - return requested, nil -} - -func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) -} diff --git a/builder/dockerfile/dispatchers_windows.go b/builder/dockerfile/dispatchers_windows.go deleted file mode 100644 index 5a40ae09d..000000000 --- a/builder/dockerfile/dispatchers_windows.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { - if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") - } - - current = filepath.FromSlash(current) - requested = filepath.FromSlash(requested) - - // Target semantics is C:\somefolder, specifically in the format: - // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already - // guaranteed that `current`, if set, is consistent. This allows us to - // cope correctly with any of the following in a Dockerfile: - // WORKDIR a --> C:\a - // WORKDIR c:\\foo --> C:\foo - // WORKDIR \\foo --> C:\foo - // WORKDIR /foo --> C:\foo - // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar - // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar - if len(current) == 0 || system.IsAbs(requested) { - if (requested[0] == os.PathSeparator) || - (len(requested) > 1 && string(requested[1]) != ":") || - (len(requested) == 1) { - requested = filepath.Join(`C:\`, requested) - } - } else { - requested = filepath.Join(current, requested) - } - // Upper-case drive letter - return (strings.ToUpper(string(requested[0])) + requested[1:]), nil -} - -func errNotJSON(command, original string) error { - // For Windows users, give a hint if it looks like it might contain - // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], - // as JSON must be escaped. Unfortunate... - // - // Specifically looking for quote-driveletter-colon-backslash, there's no - // double backslash and a [] pair. No, this is not perfect, but it doesn't - // have to be. It's simply a hint to make life a little easier. - extra := "" - original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) - if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && - !strings.Contains(original, `\\`) && - strings.Contains(original, "[") && - strings.Contains(original, "]") { - extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) - } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) -} diff --git a/builder/dockerfile/dispatchers_windows_test.go b/builder/dockerfile/dispatchers_windows_test.go deleted file mode 100644 index 4c5371319..000000000 --- a/builder/dockerfile/dispatchers_windows_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -package dockerfile - -import "testing" - -func TestNormaliseWorkdir(t *testing.T) { - tests := []struct{ current, requested, expected, etext string }{ - {``, ``, ``, `cannot normalise nothing`}, - {``, `a`, `C:\a`, ``}, - {``, `c:\foo`, `C:\foo`, ``}, - {``, `\foo`, `C:\foo`, ``}, - {``, `/foo`, `C:\foo`, ``}, - {``, `C:/foo`, `C:\foo`, ``}, - {`C:\foo`, `bar`, `C:\foo\bar`, ``}, - {`C:\foo`, `/bar`, `C:\bar`, ``}, - {`C:\foo`, `\bar`, `C:\bar`, ``}, - } - for _, i := range tests { - r, e := normaliseWorkdir(i.current, i.requested) - - if i.etext != "" && e == nil { - t.Fatalf("TestNormaliseWorkingDir Expected error %s", i.etext) - } - - if i.etext != "" && e.Error() != i.etext { - t.Fatalf("TestNormaliseWorkingDir Expected error %s, got %s", i.etext, e.Error()) - } - - if r != i.expected { - t.Fatalf("TestNormaliseWorkingDir Expected %s for %s %s", i.expected, i.current, i.requested) - } - } -} diff --git a/builder/dockerfile/envVarTest b/builder/dockerfile/envVarTest deleted file mode 100644 index 1a7fe975a..000000000 --- a/builder/dockerfile/envVarTest +++ /dev/null @@ -1,112 +0,0 @@ -hello | hello -he'll'o | hello -he'llo | hello -he\'llo | he'llo -he\\'llo | he\llo -abc\tdef | abctdef -"abc\tdef" | abc\tdef -'abc\tdef' | abc\tdef -hello\ | hello -hello\\ | hello\ -"hello | hello -"hello\" | hello" -"hel'lo" | hel'lo -'hello | hello -'hello\' | hello\ -"''" | '' -$. | $. -$1 | -he$1x | hex -he$.x | he$.x -he$pwd. | he. -he$PWD | he/home -he\$PWD | he$PWD -he\\$PWD | he\/home -he\${} | he${} -he\${}xx | he${}xx -he${} | he -he${}xx | hexx -he${hi} | he -he${hi}xx | hexx -he${PWD} | he/home -he${.} | error -he${XXX:-000}xx | he000xx -he${PWD:-000}xx | he/homexx -he${XXX:-$PWD}xx | he/homexx -he${XXX:-${PWD:-yyy}}xx | he/homexx -he${XXX:-${YYY:-yyy}}xx | heyyyxx -he${XXX:YYY} | error -he${XXX:+${PWD}}xx | hexx -he${PWD:+${XXX}}xx | hexx -he${PWD:+${SHELL}}xx | hebashxx -he${XXX:+000}xx | hexx -he${PWD:+000}xx | he000xx -'he${XX}' | he${XX} -"he${PWD}" | he/home -"he'$PWD'" | he'/home' -"$PWD" | /home -'$PWD' | $PWD -'\$PWD' | \$PWD -'"hello"' | "hello" -he\$PWD | he$PWD -"he\$PWD" | he$PWD -'he\$PWD' | he\$PWD -he${PWD | error -he${PWD:=000}xx | error -he${PWD:+${PWD}:}xx | he/home:xx -he${XXX:-\$PWD:}xx | he$PWD:xx -he${XXX:-\${PWD}z}xx | he${PWDz}xx -안녕하세요 | 안녕하세요 -안'녕'하세요 | 안녕하세요 -안'녕하세요 | 안녕하세요 -안녕\'하세요 | 안녕'하세요 -안\\'녕하세요 | 안\녕하세요 -안녕\t하세요 | 안녕t하세요 -"안녕\t하세요" | 안녕\t하세요 -'안녕\t하세요 | 안녕\t하세요 -안녕하세요\ | 안녕하세요 -안녕하세요\\ | 안녕하세요\ -"안녕하세요 | 안녕하세요 -"안녕하세요\" | 안녕하세요" -"안녕'하세요" | 안녕'하세요 -'안녕하세요 | 안녕하세요 -'안녕하세요\' | 안녕하세요\ -안녕$1x | 안녕x -안녕$.x | 안녕$.x -안녕$pwd. | 안녕. -안녕$PWD | 안녕/home -안녕\$PWD | 안녕$PWD -안녕\\$PWD | 안녕\/home -안녕\${} | 안녕${} -안녕\${}xx | 안녕${}xx -안녕${} | 안녕 -안녕${}xx | 안녕xx -안녕${hi} | 안녕 -안녕${hi}xx | 안녕xx -안녕${PWD} | 안녕/home -안녕${.} | error -안녕${XXX:-000}xx | 안녕000xx -안녕${PWD:-000}xx | 안녕/homexx -안녕${XXX:-$PWD}xx | 안녕/homexx -안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx -안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx -안녕${XXX:YYY} | error -안녕${XXX:+${PWD}}xx | 안녕xx -안녕${PWD:+${XXX}}xx | 안녕xx -안녕${PWD:+${SHELL}}xx | 안녕bashxx -안녕${XXX:+000}xx | 안녕xx -안녕${PWD:+000}xx | 안녕000xx -'안녕${XX}' | 안녕${XX} -"안녕${PWD}" | 안녕/home -"안녕'$PWD'" | 안녕'/home' -'"안녕"' | "안녕" -안녕\$PWD | 안녕$PWD -"안녕\$PWD" | 안녕$PWD -'안녕\$PWD' | 안녕\$PWD -안녕${PWD | error -안녕${PWD:=000}xx | error -안녕${PWD:+${PWD}:}xx | 안녕/home:xx -안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx -안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx -$KOREAN | 한국어 -안녕$KOREAN | 안녕한국어 diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go deleted file mode 100644 index 4c9d425f9..000000000 --- a/builder/dockerfile/evaluator.go +++ /dev/null @@ -1,203 +0,0 @@ -// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. -// -// It incorporates a dispatch table based on the parser.Node values (see the -// parser package for more information) that are yielded from the parser itself. -// Calling NewBuilder with the BuildOpts struct can be used to customize the -// experience for execution purposes only. Parsing is controlled in the parser -// package, and this division of responsibility should be respected. -// -// Please see the jump table targets for the actual invocations, most of which -// will call out to the functions in internals.go to deal with their tasks. -// -// ONBUILD is a special case, which is covered in the onbuild() func in -// dispatchers.go. -// -// The evaluator uses the concept of "steps", which are usually each processable -// line in the Dockerfile. Each step is numbered and certain actions are taken -// before and after each step, such as creating an image ID and removing temporary -// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which -// includes its own set of steps (usually only one of them). -package dockerfile - -import ( - "fmt" - "strings" - - "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/builder/dockerfile/parser" -) - -// Environment variable interpolation will happen on these statements only. -var replaceEnvAllowed = map[string]bool{ - command.Env: true, - command.Label: true, - command.Add: true, - command.Copy: true, - command.Workdir: true, - command.Expose: true, - command.Volume: true, - command.User: true, - command.StopSignal: true, - command.Arg: true, -} - -// Certain commands are allowed to have their args split into more -// words after env var replacements. Meaning: -// ENV foo="123 456" -// EXPOSE $foo -// should result in the same thing as: -// EXPOSE 123 456 -// and not treat "123 456" as a single word. -// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. -// Quotes will cause it to still be treated as single word. -var allowWordExpansion = map[string]bool{ - command.Expose: true, -} - -var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error - -func init() { - evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ - command.Add: add, - command.Arg: arg, - command.Cmd: cmd, - command.Copy: dispatchCopy, // copy() is a go builtin - command.Entrypoint: entrypoint, - command.Env: env, - command.Expose: expose, - command.From: from, - command.Healthcheck: healthcheck, - command.Label: label, - command.Maintainer: maintainer, - command.Onbuild: onbuild, - command.Run: run, - command.Shell: shell, - command.StopSignal: stopSignal, - command.User: user, - command.Volume: volume, - command.Workdir: workdir, - } -} - -// This method is the entrypoint to all statement handling routines. -// -// Almost all nodes will have this structure: -// Child[Node, Node, Node] where Child is from parser.Node.Children and each -// node comes from parser.Node.Next. This forms a "line" with a statement and -// arguments and we process them in this normalized form by hitting -// evaluateTable with the leaf nodes of the command and the Builder object. -// -// ONBUILD is a special case; in this case the parser will emit: -// Child[Node, Child[Node, Node...]] where the first node is the literal -// "onbuild" and the child entrypoint is the command of the ONBUILD statement, -// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to -// deal with that, at least until it becomes more of a general concern with new -// features. -func (b *Builder) dispatch(stepN int, ast *parser.Node) error { - cmd := ast.Value - upperCasedCmd := strings.ToUpper(cmd) - - // To ensure the user is given a decent error message if the platform - // on which the daemon is running does not support a builder command. - if err := platformSupports(strings.ToLower(cmd)); err != nil { - return err - } - - attrs := ast.Attributes - original := ast.Original - flags := ast.Flags - strList := []string{} - msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - if cmd == "onbuild" { - if ast.Next == nil { - return fmt.Errorf("ONBUILD requires at least one argument") - } - ast = ast.Next.Children[0] - strList = append(strList, ast.Value) - msg += " " + ast.Value - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - } - - // count the number of nodes that we are going to traverse first - // so we can pre-create the argument and message array. This speeds up the - // allocation of those list a lot when they have a lot of arguments - cursor := ast - var n int - for cursor.Next != nil { - cursor = cursor.Next - n++ - } - msgList := make([]string, n) - - var i int - // Append the build-time args to config-environment. - // This allows builder config to override the variables, making the behavior similar to - // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build - // context. But `ENV foo $foo` will use the value from build context if one - // isn't already been defined by a previous ENV primitive. - // Note, we get this behavior because we know that ProcessWord() will - // stop on the first occurrence of a variable name and not notice - // a subsequent one. So, putting the buildArgs list after the Config.Env - // list, in 'envs', is safe. - envs := b.runConfig.Env - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue - } - envs = append(envs, fmt.Sprintf("%s=%s", key, val)) - } - for ast.Next != nil { - ast = ast.Next - var str string - str = ast.Value - if replaceEnvAllowed[cmd] { - var err error - var words []string - - if allowWordExpansion[cmd] { - words, err = ProcessWords(str, envs) - if err != nil { - return err - } - strList = append(strList, words...) - } else { - str, err = ProcessWord(str, envs) - if err != nil { - return err - } - strList = append(strList, str) - } - } else { - strList = append(strList, str) - } - msgList[i] = ast.Value - i++ - } - - msg += " " + strings.Join(msgList, " ") - fmt.Fprintln(b.Stdout, msg) - - // XXX yes, we skip any cmds that are not valid; the parser should have - // picked these out already. - if f, ok := evaluateTable[cmd]; ok { - b.flags = NewBFlags() - b.flags.Args = flags - return f(b, strList, attrs, original) - } - - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) -} diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go deleted file mode 100644 index 5b9f7ec8e..000000000 --- a/builder/dockerfile/evaluator_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package dockerfile - -import ( - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -type dispatchTestCase struct { - name, dockerfile, expectedError string - files map[string]string -} - -func init() { - reexec.Init() -} - -func initDispatchTestCases() []dispatchTestCase { - dispatchTestCases := []dispatchTestCase{{ - name: "copyEmptyWhitespace", - dockerfile: `COPY - quux \ - bar`, - expectedError: "COPY requires at least one argument", - }, - { - name: "ONBUILD forbidden FROM", - dockerfile: "ONBUILD FROM scratch", - expectedError: "FROM isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ONBUILD forbidden MAINTAINER", - dockerfile: "ONBUILD MAINTAINER docker.io", - expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ARG two arguments", - dockerfile: "ARG foo bar", - expectedError: "ARG requires exactly one argument definition", - files: nil, - }, - { - name: "MAINTAINER unknown flag", - dockerfile: "MAINTAINER --boo joe@example.com", - expectedError: "Unknown flag: boo", - files: nil, - }, - { - name: "ADD multiple files to file", - dockerfile: "ADD file1.txt file2.txt test", - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "JSON ADD multiple files to file", - dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "Wildcard ADD multiple files to file", - dockerfile: "ADD file*.txt test", - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "Wildcard JSON ADD multiple files to file", - dockerfile: `ADD ["file*.txt", "test"]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "COPY multiple files to file", - dockerfile: "COPY file1.txt file2.txt test", - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "JSON COPY multiple files to file", - dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "ADD multiple files to file with whitespace", - dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, - }, - { - name: "COPY multiple files to file with whitespace", - dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, - }, - { - name: "COPY wildcard no files", - dockerfile: `COPY file*.txt /tmp/`, - expectedError: "No source files were specified", - files: nil, - }, - { - name: "COPY url", - dockerfile: `COPY https://index.docker.io/robots.txt /`, - expectedError: "Source can't be a URL for COPY", - files: nil, - }, - { - name: "Chaining ONBUILD", - dockerfile: `ONBUILD ONBUILD RUN touch foobar`, - expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - files: nil, - }, - { - name: "Invalid instruction", - dockerfile: `foo bar`, - expectedError: "Unknown instruction: FOO", - files: nil, - }} - - return dispatchTestCases -} - -func TestDispatch(t *testing.T) { - testCases := initDispatchTestCases() - - for _, testCase := range testCases { - executeTestCase(t, testCase) - } -} - -func executeTestCase(t *testing.T, testCase dispatchTestCase) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - for filename, content := range testCase.files { - createTestTempFile(t, contextDir, filename, content, 0777) - } - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - r := strings.NewReader(testCase.dockerfile) - n, err := parser.Parse(r) - - if err != nil { - t.Fatalf("Error when parsing Dockerfile: %s", err) - } - - config := &container.Config{} - options := &types.ImageBuildOptions{} - - b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} - - err = b.dispatch(0, n.Children[0]) - - if err == nil { - t.Fatalf("No error when executing test %s", testCase.name) - } - - if !strings.Contains(err.Error(), testCase.expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) - } - -} diff --git a/builder/dockerfile/evaluator_unix.go b/builder/dockerfile/evaluator_unix.go deleted file mode 100644 index 28fd5b156..000000000 --- a/builder/dockerfile/evaluator_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package dockerfile - -// platformSupports is a short-term function to give users a quality error -// message if a Dockerfile uses a command not supported on the platform. -func platformSupports(command string) error { - return nil -} diff --git a/builder/dockerfile/evaluator_windows.go b/builder/dockerfile/evaluator_windows.go deleted file mode 100644 index 43a0b70be..000000000 --- a/builder/dockerfile/evaluator_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package dockerfile - -import "fmt" - -// platformSupports is gives users a quality error message if a Dockerfile uses -// a command not supported on the platform. -func platformSupports(command string) error { - switch command { - case "user", "stopsignal": - return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) - } - return nil -} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go deleted file mode 100644 index 50755f264..000000000 --- a/builder/dockerfile/internals.go +++ /dev/null @@ -1,669 +0,0 @@ -package dockerfile - -// internals for handling commands. Covers many areas and a lot of -// non-contiguous functionality. Please read the comments. - -import ( - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" -) - -func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { - if b.disableCommit { - return nil - } - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.runConfig.Image = b.image - - if id == "" { - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } else if hit { - return nil - } - id, err = b.create() - if err != nil { - return err - } - } - - // Note: Actually copy the struct - autoConfig := *b.runConfig - autoConfig.Cmd = autoCmd - - commitCfg := &backend.ContainerCommitConfig{ - ContainerCommitConfig: types.ContainerCommitConfig{ - Author: b.maintainer, - Pause: true, - Config: &autoConfig, - }, - } - - // Commit the container - imageID, err := b.docker.Commit(id, commitCfg) - if err != nil { - return err - } - - b.image = imageID - return nil -} - -type copyInfo struct { - builder.FileInfo - decompress bool -} - -func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - - if len(args) < 2 { - return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) - } - - // Work in daemon-specific filepath semantics - dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest - - b.runConfig.Image = b.image - - var infos []copyInfo - - // Loop through each src file and calculate the info we need to - // do the copy (e.g. hash value if cached). Don't actually do - // the copy until we've looked at all src files - var err error - for _, orig := range args[0 : len(args)-1] { - var fi builder.FileInfo - decompress := allowLocalDecompression - if urlutil.IsURL(orig) { - if !allowRemote { - return fmt.Errorf("Source can't be a URL for %s", cmdName) - } - fi, err = b.download(orig) - if err != nil { - return err - } - defer os.RemoveAll(filepath.Dir(fi.Path())) - decompress = false - infos = append(infos, copyInfo{fi, decompress}) - continue - } - // not a URL - subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) - if err != nil { - return err - } - - infos = append(infos, subInfos...) - } - - if len(infos) == 0 { - return fmt.Errorf("No source files were specified") - } - if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { - return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } - - // For backwards compat, if there's just one info then use it as the - // cache look-up string, otherwise hash 'em all into one - var srcHash string - var origPaths string - - if len(infos) == 1 { - fi := infos[0].FileInfo - origPaths = fi.Name() - if hfi, ok := fi.(builder.Hashed); ok { - srcHash = hfi.Hash() - } - } else { - var hashs []string - var origs []string - for _, info := range infos { - fi := info.FileInfo - origs = append(origs, fi.Name()) - if hfi, ok := fi.(builder.Hashed); ok { - hashs = append(hashs, hfi.Hash()) - } - } - hasher := sha256.New() - hasher.Write([]byte(strings.Join(hashs, ","))) - srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) - origPaths = strings.Join(origs, " ") - } - - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - if hit, err := b.probeCache(); err != nil { - return err - } else if hit { - return nil - } - - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}, true) - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) - - // Twiddle the destination when its a relative path - meaning, make it - // relative to the WORKINGDIR - if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { - return err - } - - for _, info := range infos { - if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { - return err - } - } - - return b.commit(container.ID, cmd, comment) -} - -func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { - // get filename from URL - u, err := url.Parse(srcURL) - if err != nil { - return - } - path := filepath.FromSlash(u.Path) // Ensure in platform semantics - if strings.HasSuffix(path, string(os.PathSeparator)) { - path = path[:len(path)-1] - } - parts := strings.Split(path, string(os.PathSeparator)) - filename := parts[len(parts)-1] - if filename == "" { - err = fmt.Errorf("cannot determine filename from url: %s", u) - return - } - - // Initiate the download - resp, err := httputils.Download(srcURL) - if err != nil { - return - } - - // Prepare file in a tmp dir - tmpDir, err := ioutils.TempDir("", "docker-remote") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - tmpFileName := filepath.Join(tmpDir, filename) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return - } - - stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) - progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) - progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") - // Download and dump result to tmp file - if _, err = io.Copy(tmpFile, progressReader); err != nil { - tmpFile.Close() - return - } - fmt.Fprintln(b.Stdout) - // ignoring error because the file was already opened successfully - tmpFileSt, err := tmpFile.Stat() - if err != nil { - return - } - tmpFile.Close() - - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - mTime := time.Time{} - - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } - } - - if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { - return - } - - // Calc the checksum, even if we're using the cache - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return - } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) - if err != nil { - return - } - if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { - return - } - hash := tarSum.Sum(nil) - r.Close() - return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil -} - -func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { - - // Work in daemon-specific OS filepath semantics - origPath = filepath.FromSlash(origPath) - - if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) - - // Deal with wildcards - if allowWildcards && containsWildcards(origPath) { - var copyInfos []copyInfo - if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - if info.Name() == "" { - // Why are we doing this check? - return nil - } - if match, _ := filepath.Match(origPath, path); !match { - return nil - } - - // Note we set allowWildcards to false in case the name has - // a * in it - subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) - if err != nil { - return err - } - copyInfos = append(copyInfos, subInfos...) - return nil - }); err != nil { - return nil, err - } - return copyInfos, nil - } - - // Must be a dir or a file - - statPath, fi, err := b.context.Stat(origPath) - if err != nil { - return nil, err - } - - copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} - - hfi, handleHash := fi.(builder.Hashed) - if !handleHash { - return copyInfos, nil - } - - // Deal with the single file case - if !fi.IsDir() { - hfi.SetHash("file:" + hfi.Hash()) - return copyInfos, nil - } - // Must be a dir - var subfiles []string - err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - // we already checked handleHash above - subfiles = append(subfiles, info.(builder.Hashed).Hash()) - return nil - }) - if err != nil { - return nil, err - } - - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) - - return copyInfos, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -func (b *Builder) processImageFrom(img builder.Image) error { - if img != nil { - b.image = img.ImageID() - - if img.RunConfig() != nil { - b.runConfig = img.RunConfig() - } - } - - // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS - if system.DefaultPathEnv != "" { - // Convert the slice of strings that represent the current list - // of env vars into a map so we can see if PATH is already set. - // If its not set then go ahead and give it our default value - configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) - if _, ok := configEnv["PATH"]; !ok { - b.runConfig.Env = append(b.runConfig.Env, - "PATH="+system.DefaultPathEnv) - } - } - - if img == nil { - // Typically this means they used "FROM scratch" - return nil - } - - // Process ONBUILD triggers if they exist - if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { - word := "trigger" - if nTriggers > 1 { - word = "triggers" - } - fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) - } - - // Copy the ONBUILD triggers, and remove them from the config, since the config will be comitted. - onBuildTriggers := b.runConfig.OnBuild - b.runConfig.OnBuild = []string{} - - // parse the ONBUILD triggers by invoking the parser - for _, step := range onBuildTriggers { - ast, err := parser.Parse(strings.NewReader(step)) - if err != nil { - return err - } - - for i, n := range ast.Children { - switch strings.ToUpper(n.Value) { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) - } - - if err := b.dispatch(i, n); err != nil { - return err - } - } - } - - return nil -} - -// probeCache checks if `b.docker` implements builder.ImageCache and image-caching -// is enabled (`b.UseCache`). -// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. -// If an image is found, probeCache returns `(true, nil)`. -// If no image is found, it returns `(false, nil)`. -// If there is any error, it returns `(false, err)`. -func (b *Builder) probeCache() (bool, error) { - c, ok := b.docker.(builder.ImageCache) - if !ok || b.options.NoCache || b.cacheBusted { - return false, nil - } - cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) - if err != nil { - return false, err - } - if len(cache) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) - b.cacheBusted = true - return false, nil - } - - fmt.Fprintf(b.Stdout, " ---> Using cache\n") - logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) - b.image = string(cache) - - return true, nil -} - -func (b *Builder) create() (string, error) { - if b.image == "" && !b.noBaseImage { - return "", fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.runConfig.Image = b.image - - resources := container.Resources{ - CgroupParent: b.options.CgroupParent, - CPUShares: b.options.CPUShares, - CPUPeriod: b.options.CPUPeriod, - CPUQuota: b.options.CPUQuota, - CpusetCpus: b.options.CPUSetCPUs, - CpusetMems: b.options.CPUSetMems, - Memory: b.options.Memory, - MemorySwap: b.options.MemorySwap, - Ulimits: b.options.Ulimits, - } - - // TODO: why not embed a hostconfig in builder? - hostConfig := &container.HostConfig{ - Isolation: b.options.Isolation, - ShmSize: b.options.ShmSize, - Resources: resources, - } - - config := *b.runConfig - - // Create the container - c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - HostConfig: hostConfig, - }, true) - if err != nil { - return "", err - } - for _, warning := range c.Warnings { - fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) - } - - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { - return "", err - } - - return c.ID, nil -} - -var errCancelled = errors.New("build cancelled") - -func (b *Builder) run(cID string) (err error) { - errCh := make(chan error) - go func() { - errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) - }() - - finished := make(chan struct{}) - var once sync.Once - finish := func() { close(finished) } - cancelErrCh := make(chan error, 1) - defer once.Do(finish) - go func() { - select { - case <-b.clientCtx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - b.docker.ContainerKill(cID, 0) - b.removeContainer(cID) - cancelErrCh <- errCancelled - case <-finished: - cancelErrCh <- nil - } - }() - - if err := b.docker.ContainerStart(cID, nil, true); err != nil { - return err - } - - // Block on reading output from container, stop on err or chan closed - if err := <-errCh; err != nil { - return err - } - - if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { - // TODO: change error type, because jsonmessage.JSONError assumes HTTP - return &jsonmessage.JSONError{ - Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), - Code: ret, - } - } - once.Do(finish) - return <-cancelErrCh -} - -func (b *Builder) removeContainer(c string) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, - } - if err := b.docker.ContainerRm(c, rmConfig); err != nil { - fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) - return err - } - return nil -} - -func (b *Builder) clearTmp() { - for c := range b.tmpContainers { - if err := b.removeContainer(c); err != nil { - return - } - delete(b.tmpContainers, c) - fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) - } -} - -// readDockerfile reads a Dockerfile from the current context. -func (b *Builder) readDockerfile() error { - // If no -f was specified then look for 'Dockerfile'. If we can't find - // that then look for 'dockerfile'. If neither are found then default - // back to 'Dockerfile' and use that in the error message. - if b.options.Dockerfile == "" { - b.options.Dockerfile = builder.DefaultDockerfileName - if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { - lowercase := strings.ToLower(b.options.Dockerfile) - if _, _, err := b.context.Stat(lowercase); err == nil { - b.options.Dockerfile = lowercase - } - } - } - - err := b.parseDockerfile() - - if err != nil { - return err - } - - // After the Dockerfile has been parsed, we need to check the .dockerignore - // file for either "Dockerfile" or ".dockerignore", and if either are - // present then erase them from the build context. These files should never - // have been sent from the client but we did send them to make sure that - // we had the Dockerfile to actually parse, and then we also need the - // .dockerignore file to know whether either file should be removed. - // Note that this assumes the Dockerfile has been read into memory and - // is now safe to be removed. - if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { - dockerIgnore.Process([]string{b.options.Dockerfile}) - } - return nil -} - -func (b *Builder) parseDockerfile() error { - f, err := b.context.Open(b.options.Dockerfile) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) - } - return err - } - defer f.Close() - if f, ok := f.(*os.File); ok { - // ignoring error because Open already succeeded - fi, err := f.Stat() - if err != nil { - return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) - } - if fi.Size() == 0 { - return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) - } - } - b.dockerfile, err = parser.Parse(f) - if err != nil { - return err - } - - return nil -} - -// determine if build arg is part of built-in args or user -// defined args in Dockerfile at any point in time. -func (b *Builder) isBuildArgAllowed(arg string) bool { - if _, ok := BuiltinAllowedBuildArgs[arg]; ok { - return true - } - if _, ok := b.allowedBuildArgs[arg]; ok { - return true - } - return false -} diff --git a/builder/dockerfile/internals_test.go b/builder/dockerfile/internals_test.go deleted file mode 100644 index 7ab22156e..000000000 --- a/builder/dockerfile/internals_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package dockerfile - -import ( - "fmt" - "strings" - "testing" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/archive" - "github.com/docker/engine-api/types" -) - -func TestEmptyDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) - - readAndCheckDockerfile(t, "emptyDockefile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") -} - -func TestSymlinkDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") - - // The reason the error is "Cannot locate specified Dockerfile" is because - // in the builder, the symlink is resolved within the context, therefore - // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is - // a nonexistent file. - expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) - - readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) -} - -func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - options := &types.ImageBuildOptions{ - Dockerfile: dockerfilePath, - } - - b := &Builder{options: options, context: context} - - err = b.readDockerfile() - - if err == nil { - t.Fatalf("No error when executing test: %s", testName) - } - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) - } -} - -func TestDockerfileOutsideTheBuildContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - options := &types.ImageBuildOptions{ - Dockerfile: "../../Dockerfile", - } - - b := &Builder{options: options, context: context} - - err = b.readDockerfile() - - if err == nil { - t.Fatalf("No error when executing test for Dockerfile outside the build context") - } - - expectedError := "Forbidden path outside the build context" - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) - } -} diff --git a/builder/dockerfile/internals_unix.go b/builder/dockerfile/internals_unix.go deleted file mode 100644 index 6cd990d89..000000000 --- a/builder/dockerfile/internals_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) - if !system.IsAbs(requested) { - dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - return dest, nil -} diff --git a/builder/dockerfile/internals_windows.go b/builder/dockerfile/internals_windows.go deleted file mode 100644 index f70360300..000000000 --- a/builder/dockerfile/internals_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) - - // We are guaranteed that the working directory is already consistent, - // However, Windows also has, for now, the limitation that ADD/COPY can - // only be done to the system drive, not any drives that might be present - // as a result of a bind mount. - // - // So... if the path requested is Linux-style absolute (/foo or \\foo), - // we assume it is the system drive. If it is a Windows-style absolute - // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we - // strip any configured working directories drive letter so that it - // can be subsequently legitimately converted to a Windows volume-style - // pathname. - - // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as - // we only want to validate where the DriveColon part has been supplied. - if filepath.IsAbs(dest) { - if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) - } - dest = dest[2:] // Strip the drive letter - } - - // Cannot handle relative where WorkingDir is not the system drive. - if len(workingDir) > 0 { - if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { - return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) - } - if !system.IsAbs(dest) { - if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) - } - dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - } - return dest, nil -} diff --git a/builder/dockerfile/internals_windows_test.go b/builder/dockerfile/internals_windows_test.go deleted file mode 100644 index 868a6671a..000000000 --- a/builder/dockerfile/internals_windows_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build windows - -package dockerfile - -import "testing" - -func TestNormaliseDest(t *testing.T) { - tests := []struct{ current, requested, expected, etext string }{ - {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, - {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, - {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, - {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, - {``, `D`, `D`, ``}, - {``, `./a1`, `.\a1`, ``}, - {``, `.\b1`, `.\b1`, ``}, - {``, `/`, `\`, ``}, - {``, `\`, `\`, ``}, - {``, `c:/`, `\`, ``}, - {``, `c:\`, `\`, ``}, - {``, `.`, `.`, ``}, - {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, - {`C:\wde`, `.\b1`, `\wde\b1`, ``}, - {`C:\wdf`, `/`, `\`, ``}, - {`C:\wdg`, `\`, `\`, ``}, - {`C:\wdh`, `c:/`, `\`, ``}, - {`C:\wdi`, `c:\`, `\`, ``}, - {`C:\wdj`, `.`, `\wdj`, ``}, - {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, - {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, - {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, - {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, - } - for _, i := range tests { - got, err := normaliseDest("TEST", i.current, i.requested) - if err != nil && i.etext == "" { - t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) - } - if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { - if err == nil { - t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) - } else { - t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) - } - } - if i.etext == "" && got != i.expected { - t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) - } - } -} diff --git a/builder/dockerfile/parser/dumper/main.go b/builder/dockerfile/parser/dumper/main.go deleted file mode 100644 index 8c357f4c9..000000000 --- a/builder/dockerfile/parser/dumper/main.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/builder/dockerfile/parser" -) - -func main() { - var f *os.File - var err error - - if len(os.Args) < 2 { - fmt.Println("please supply filename(s)") - os.Exit(1) - } - - for _, fn := range os.Args[1:] { - f, err = os.Open(fn) - if err != nil { - panic(err) - } - - ast, err := parser.Parse(f) - if err != nil { - panic(err) - } else { - fmt.Println(ast.Dump()) - } - } -} diff --git a/builder/dockerfile/parser/json_test.go b/builder/dockerfile/parser/json_test.go deleted file mode 100644 index a256f845d..000000000 --- a/builder/dockerfile/parser/json_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package parser - -import ( - "testing" -) - -var invalidJSONArraysOfStrings = []string{ - `["a",42,"b"]`, - `["a",123.456,"b"]`, - `["a",{},"b"]`, - `["a",{"c": "d"},"b"]`, - `["a",["c"],"b"]`, - `["a",true,"b"]`, - `["a",false,"b"]`, - `["a",null,"b"]`, -} - -var validJSONArraysOfStrings = map[string][]string{ - `[]`: {}, - `[""]`: {""}, - `["a"]`: {"a"}, - `["a","b"]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - ` [ "a", "b" ] `: {"a", "b"}, - `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, -} - -func TestJSONArraysOfStrings(t *testing.T) { - for json, expected := range validJSONArraysOfStrings { - if node, _, err := parseJSON(json); err != nil { - t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) - } else { - i := 0 - for node != nil { - if i >= len(expected) { - t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) - } - if node.Value != expected[i] { - t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) - } - node = node.Next - i++ - } - if i != len(expected) { - t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) - } - } - } - for _, json := range invalidJSONArraysOfStrings { - if _, _, err := parseJSON(json); err != errDockerfileNotStringArray { - t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) - } - } -} diff --git a/builder/dockerfile/parser/line_parsers.go b/builder/dockerfile/parser/line_parsers.go deleted file mode 100644 index 5f484e499..000000000 --- a/builder/dockerfile/parser/line_parsers.go +++ /dev/null @@ -1,361 +0,0 @@ -package parser - -// line parsers are dispatch calls that parse a single unit of text into a -// Node object which contains the whole statement. Dockerfiles have varied -// (but not usually unique, see ONBUILD for a unique example) parsing rules -// per-command, and these unify the processing in a way that makes it -// manageable. - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") -) - -// ignore the current argument. This will still leave a command parsed, but -// will not incorporate the arguments into the ast. -func parseIgnore(rest string) (*Node, map[string]bool, error) { - return &Node{}, nil, nil -} - -// used for onbuild. Could potentially be used for anything that represents a -// statement with sub-statements. -// -// ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// -func parseSubCommand(rest string) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - _, child, err := ParseLine(rest) - if err != nil { - return nil, nil, err - } - - return &Node{Children: []*Node{child}}, nil, nil -} - -// helper to parse words (i.e space delimited or quoted strings) in a statement. -// The quotes are preserved as part of this function and they are stripped later -// as part of processWords(). -func parseWords(rest string) []string { - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - var chWidth int - - for pos := 0; pos <= len(rest); pos += chWidth { - if pos != len(rest) { - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(rest) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - phase = inWord // found it, fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - } - if ch == tokenEscape { - if pos+chWidth == len(rest) { - continue // just skip an escape token at end of line - } - // If we're not quoted and we see an escape token, then always just - // add the escape token plus the char to the word, even if the char - // is a quote. - word += string(ch) - pos += chWidth - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - } - // The escape token is special except for ' quotes - can't escape anything for ' - if ch == tokenEscape && quote != '\'' { - if pos+chWidth == len(rest) { - phase = inWord - continue // just skip the escape token at end - } - pos += chWidth - word += string(ch) - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - } - } - - return words -} - -// parse environment like statements. Note that this does *not* handle -// variable interpolation, which will be handled in the evaluator. -func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { - // This is kind of tricky because we need to support the old - // variant: KEY name value - // as well as the new one: KEY name=value ... - // The trigger to know which one is being used will be whether we hit - // a space or = first. space ==> old, "=" ==> new - - words := parseWords(rest) - if len(words) == 0 { - return nil, nil, nil - } - - var rootnode *Node - - // Old format (KEY name value) - if !strings.Contains(words[0], "=") { - node := &Node{} - rootnode = node - strs := tokenWhitespace.Split(rest, 2) - - if len(strs) < 2 { - return nil, nil, fmt.Errorf(key + " must have two arguments") - } - - node.Value = strs[0] - node.Next = &Node{} - node.Next.Value = strs[1] - } else { - var prevNode *Node - for i, word := range words { - if !strings.Contains(word, "=") { - return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) - } - parts := strings.SplitN(word, "=", 2) - - name := &Node{} - value := &Node{} - - name.Next = value - name.Value = parts[0] - value.Value = parts[1] - - if i == 0 { - rootnode = name - } else { - prevNode.Next = name - } - prevNode = value - } - } - - return rootnode, nil, nil -} - -func parseEnv(rest string) (*Node, map[string]bool, error) { - return parseNameVal(rest, "ENV") -} - -func parseLabel(rest string) (*Node, map[string]bool, error) { - return parseNameVal(rest, "LABEL") -} - -// parses a statement containing one or more keyword definition(s) and/or -// value assignments, like `name1 name2= name3="" name4=value`. -// Note that this is a stricter format than the old format of assignment, -// allowed by parseNameVal(), in a way that this only allows assignment of the -// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. -// In addition, a keyword definition alone is of the form `keyword` like `name1` -// above. And the assignments `name2=` and `name3=""` are equivalent and -// assign an empty value to the respective keywords. -func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) { - words := parseWords(rest) - if len(words) == 0 { - return nil, nil, nil - } - - var ( - rootnode *Node - prevNode *Node - ) - for i, word := range words { - node := &Node{} - node.Value = word - if i == 0 { - rootnode = node - } else { - prevNode.Next = node - } - prevNode = node - } - - return rootnode, nil, nil -} - -// parses a whitespace-delimited set of arguments. The result is effectively a -// linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node := &Node{} - rootnode := node - prevnode := node - for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp - prevnode = node - node.Value = str - node.Next = &Node{} - node = node.Next - } - - // XXX to get around regexp.Split *always* providing an empty string at the - // end due to how our loop is constructed, nil out the last node in the - // chain. - prevnode.Next = nil - - return rootnode, nil, nil -} - -// parsestring just wraps the string in quotes and returns a working node. -func parseString(rest string) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - n := &Node{} - n.Value = rest - return n, nil, nil -} - -// parseJSON converts JSON arrays to an AST. -func parseJSON(rest string) (*Node, map[string]bool, error) { - rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) - } - - var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { - return nil, nil, err - } - - var top, prev *Node - for _, str := range myJSON { - s, ok := str.(string) - if !ok { - return nil, nil, errDockerfileNotStringArray - } - - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node - } - - return top, map[string]bool{"json": true}, nil -} - -// parseMaybeJSON determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, quotes the result and returns a single -// node. -func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node, attrs, err := parseJSON(rest) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - node = &Node{} - node.Value = rest - return node, nil, nil -} - -// parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attempts to parse it as a whitespace -// delimited string. -func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - return parseStringsWhitespaceDelimited(rest) -} - -// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. -func parseHealthConfig(rest string) (*Node, map[string]bool, error) { - // Find end of first argument - var sep int - for ; sep < len(rest); sep++ { - if unicode.IsSpace(rune(rest[sep])) { - break - } - } - next := sep - for ; next < len(rest); next++ { - if !unicode.IsSpace(rune(rest[next])) { - break - } - } - - if sep == 0 { - return nil, nil, nil - } - - typ := rest[:sep] - cmd, attrs, err := parseMaybeJSON(rest[next:]) - if err != nil { - return nil, nil, err - } - - return &Node{Value: typ, Next: cmd, Attributes: attrs}, nil, err -} diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go deleted file mode 100644 index eefbf0cf3..000000000 --- a/builder/dockerfile/parser/parser.go +++ /dev/null @@ -1,213 +0,0 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strings" - "unicode" - - "github.com/docker/docker/builder/dockerfile/command" -) - -// Node is a structure used to represent a parse tree. -// -// In the node there are three fields, Value, Next, and Children. Value is the -// current token's string value. Next is always the next non-child token, and -// children contains all the children. Here's an example: -// -// (value next (child child-next child-next-next) next-next) -// -// This data structure is frankly pretty lousy for handling complex languages, -// but lucky for us the Dockerfile isn't very complicated. This structure -// works a little more effectively than a "proper" parse tree for our needs. -// -type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends -} - -var ( - dispatch map[string]func(string) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenLineContinuation *regexp.Regexp - tokenEscape rune - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) - lookingForDirectives bool - directiveEscapeSeen bool -) - -const defaultTokenEscape = "\\" - -// setTokenEscape sets the default token for escaping characters in a Dockerfile. -func setTokenEscape(s string) error { - if s != "`" && s != "\\" { - return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) - } - tokenEscape = rune(s[0]) - tokenLineContinuation = regexp.MustCompile(`\` + s + `[ \t]*$`) - return nil -} - -func init() { - // Dispatch Table. see line_parsers.go for the parse functions. - // The command is parsed and mapped to the line parser. The line parser - // receives the arguments but not the command, and returns an AST after - // reformulating the arguments according to the rules in the parser - // functions. Errors are propagated up by Parse() and the resulting AST can - // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string) (*Node, map[string]bool, error){ - command.Add: parseMaybeJSONToList, - command.Arg: parseNameOrNameVal, - command.Cmd: parseMaybeJSON, - command.Copy: parseMaybeJSONToList, - command.Entrypoint: parseMaybeJSON, - command.Env: parseEnv, - command.Expose: parseStringsWhitespaceDelimited, - command.From: parseString, - command.Healthcheck: parseHealthConfig, - command.Label: parseLabel, - command.Maintainer: parseString, - command.Onbuild: parseSubCommand, - command.Run: parseMaybeJSON, - command.Shell: parseMaybeJSON, - command.StopSignal: parseString, - command.User: parseString, - command.Volume: parseMaybeJSONToList, - command.Workdir: parseString, - } -} - -// ParseLine parse a line and return the remainder. -func ParseLine(line string) (string, *Node, error) { - - // Handle the parser directive '# escape=. Parser directives must precede - // any builder instruction or other comments, and cannot be repeated. - if lookingForDirectives { - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) > 0 { - if directiveEscapeSeen == true { - return "", nil, fmt.Errorf("only one escape parser directive can be used") - } - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if err := setTokenEscape(tecMatch[i]); err != nil { - return "", nil, err - } - directiveEscapeSeen = true - return "", nil, nil - } - } - } - } - - lookingForDirectives = false - - if line = stripComments(line); line == "" { - return "", nil, nil - } - - if tokenLineContinuation.MatchString(line) { - line = tokenLineContinuation.ReplaceAllString(line, "") - return line, nil, nil - } - - cmd, flags, args, err := splitCommand(line) - if err != nil { - return "", nil, err - } - - node := &Node{} - node.Value = cmd - - sexp, attrs, err := fullDispatch(cmd, args) - if err != nil { - return "", nil, err - } - - node.Next = sexp - node.Attributes = attrs - node.Original = line - node.Flags = flags - - return "", node, nil -} - -// Parse is the main parse routine. -// It handles an io.ReadWriteCloser and returns the root of the AST. -func Parse(rwc io.Reader) (*Node, error) { - directiveEscapeSeen = false - lookingForDirectives = true - setTokenEscape(defaultTokenEscape) // Assume the default token for escape - currentLine := 0 - root := &Node{} - root.StartLine = -1 - scanner := bufio.NewScanner(rwc) - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - line, child, err := ParseLine(scannedLine) - if err != nil { - return nil, err - } - startLine := currentLine - - if line != "" && child == nil { - for scanner.Scan() { - newline := scanner.Text() - currentLine++ - - if stripComments(strings.TrimSpace(newline)) == "" { - continue - } - - line, child, err = ParseLine(line + newline) - if err != nil { - return nil, err - } - - if child != nil { - break - } - } - if child == nil && line != "" { - _, child, err = ParseLine(line) - if err != nil { - return nil, err - } - } - } - - if child != nil { - // Update the line information for the current child. - child.StartLine = startLine - child.EndLine = currentLine - // Update the line information for the root. The starting line of the root is always the - // starting line of the first child and the ending line is the ending line of the last child. - if root.StartLine < 0 { - root.StartLine = currentLine - } - root.EndLine = currentLine - root.Children = append(root.Children, child) - } - } - - return root, nil -} diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go deleted file mode 100644 index 1f5aaf5a6..000000000 --- a/builder/dockerfile/parser/parser_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" -) - -const testDir = "testfiles" -const negativeTestDir = "testfiles-negative" -const testFileLineInfo = "testfile-line/Dockerfile" - -func getDirs(t *testing.T, dir string) []string { - f, err := os.Open(dir) - if err != nil { - t.Fatal(err) - } - - defer f.Close() - - dirs, err := f.Readdirnames(0) - if err != nil { - t.Fatal(err) - } - - return dirs -} - -func TestTestNegative(t *testing.T) { - for _, dir := range getDirs(t, negativeTestDir) { - dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - - _, err = Parse(df) - if err == nil { - t.Fatalf("No error parsing broken dockerfile for %s", dir) - } - - df.Close() - } -} - -func TestTestData(t *testing.T) { - for _, dir := range getDirs(t, testDir) { - dockerfile := filepath.Join(testDir, dir, "Dockerfile") - resultfile := filepath.Join(testDir, dir, "result") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - defer df.Close() - - ast, err := Parse(df) - if err != nil { - t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) - } - - content, err := ioutil.ReadFile(resultfile) - if err != nil { - t.Fatalf("Error reading %s's result file: %v", dir, err) - } - - if runtime.GOOS == "windows" { - // CRLF --> CR to match Unix behavior - content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) - } - - if ast.Dump()+"\n" != string(content) { - fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) - fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) - t.Fatalf("%s: AST dump of dockerfile does not match result", dir) - } - } -} - -func TestParseWords(t *testing.T) { - tests := []map[string][]string{ - { - "input": {"foo"}, - "expect": {"foo"}, - }, - { - "input": {"foo bar"}, - "expect": {"foo", "bar"}, - }, - { - "input": {"foo\\ bar"}, - "expect": {"foo\\ bar"}, - }, - { - "input": {"foo=bar"}, - "expect": {"foo=bar"}, - }, - { - "input": {"foo bar 'abc xyz'"}, - "expect": {"foo", "bar", "'abc xyz'"}, - }, - { - "input": {`foo bar "abc xyz"`}, - "expect": {"foo", "bar", `"abc xyz"`}, - }, - { - "input": {"àöû"}, - "expect": {"àöû"}, - }, - { - "input": {`föo bàr "âbc xÿz"`}, - "expect": {"föo", "bàr", `"âbc xÿz"`}, - }, - } - - for _, test := range tests { - words := parseWords(test["input"][0]) - if len(words) != len(test["expect"]) { - t.Fatalf("length check failed. input: %v, expect: %v, output: %v", test["input"][0], test["expect"], words) - } - for i, word := range words { - if word != test["expect"][i] { - t.Fatalf("word check failed for word: %q. input: %v, expect: %v, output: %v", word, test["input"][0], test["expect"], words) - } - } - } -} - -func TestLineInformation(t *testing.T) { - df, err := os.Open(testFileLineInfo) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) - } - defer df.Close() - - ast, err := Parse(df) - if err != nil { - t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) - } - - if ast.StartLine != 5 || ast.EndLine != 31 { - fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - if len(ast.Children) != 3 { - fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) - t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) - } - expected := [][]int{ - {5, 5}, - {11, 12}, - {17, 31}, - } - for i, child := range ast.Children { - if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { - t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", - i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - } -} diff --git a/builder/dockerfile/parser/testfile-line/Dockerfile b/builder/dockerfile/parser/testfile-line/Dockerfile deleted file mode 100644 index c7601c9f6..000000000 --- a/builder/dockerfile/parser/testfile-line/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# ESCAPE=\ - - - -FROM brimstone/ubuntu:14.04 - - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - - -ENV GOPATH \ -/go - - - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - - - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH - - - - diff --git a/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile deleted file mode 100644 index 1d6557879..000000000 --- a/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM busybox - -ENV PATH diff --git a/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile deleted file mode 100644 index d1be4596c..000000000 --- a/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD [ "echo", [ "nested json" ] ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile deleted file mode 100644 index 00b444cba..000000000 --- a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Seongyeol Lim - -COPY . /go/src/github.com/docker/docker -ADD . / -ADD null / -COPY nullfile /tmp -ADD [ "vimrc", "/tmp" ] -COPY [ "bashrc", "/tmp" ] -COPY [ "test file", "/tmp" ] -ADD [ "test file", "/tmp/test file" ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result deleted file mode 100644 index 85aee6401..000000000 --- a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Seongyeol Lim ") -(copy "." "/go/src/github.com/docker/docker") -(add "." "/") -(add "null" "/") -(copy "nullfile" "/tmp") -(add "vimrc" "/tmp") -(copy "bashrc" "/tmp") -(copy "test file" "/tmp") -(add "test file" "/tmp/test file") diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile deleted file mode 100644 index 0364ef9d9..000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -#escape=\ -FROM brimstone/ubuntu:14.04 - -MAINTAINER brimstone@the.narro.ws - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - -ENV GOPATH /go - -# Set our command -ENTRYPOINT ["/usr/local/bin/consuldock"] - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/builder/dockerfile/parser/testfiles/brimstone-consuldock/result deleted file mode 100644 index 227f748cd..000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-consuldock/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(maintainer "brimstone@the.narro.ws") -(env "GOPATH" "/go") -(entrypoint "/usr/local/bin/consuldock") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile deleted file mode 100644 index 25ae35216..000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM brimstone/ubuntu:14.04 - -CMD [] - -ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] - -EXPOSE 8500 8600 8400 8301 8302 - -RUN apt-get update \ - && apt-get install -y unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists - -RUN cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* - -ENV GOPATH /go - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/hashicorp/consul \ - && mv $GOPATH/bin/consul /usr/bin/consul \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result deleted file mode 100644 index 16492e516..000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(cmd) -(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") -(expose "8500" "8600" "8400" "8301" "8302") -(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") -(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") -(env "GOPATH" "/go") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile deleted file mode 100644 index 42b324e77..000000000 --- a/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:14.04 - -RUN echo hello\ - world\ - goodnight \ - moon\ - light\ -ning -RUN echo hello \ - world -RUN echo hello \ -world -RUN echo hello \ -goodbye\ -frog -RUN echo hello \ -world -RUN echo hi \ - \ - world \ -\ - good\ -\ -night -RUN echo goodbye\ -frog -RUN echo good\ -bye\ -frog - -RUN echo hello \ -# this is a comment - -# this is a comment with a blank line surrounding it - -this is some more useful stuff diff --git a/builder/dockerfile/parser/testfiles/continueIndent/result b/builder/dockerfile/parser/testfiles/continueIndent/result deleted file mode 100644 index 268ae073c..000000000 --- a/builder/dockerfile/parser/testfiles/continueIndent/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(run "echo hello world goodnight moon lightning") -(run "echo hello world") -(run "echo hello world") -(run "echo hello goodbyefrog") -(run "echo hello world") -(run "echo hi world goodnight") -(run "echo goodbyefrog") -(run "echo goodbyefrog") -(run "echo hello this is some more useful stuff") diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile deleted file mode 100644 index 8ccb71a57..000000000 --- a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -FROM cpuguy83/ubuntu -ENV NAGIOS_HOME /opt/nagios -ENV NAGIOS_USER nagios -ENV NAGIOS_GROUP nagios -ENV NAGIOS_CMDUSER nagios -ENV NAGIOS_CMDGROUP nagios -ENV NAGIOSADMIN_USER nagiosadmin -ENV NAGIOSADMIN_PASS nagios -ENV APACHE_RUN_USER nagios -ENV APACHE_RUN_GROUP nagios -ENV NAGIOS_TIMEZONE UTC - -RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list -RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx -RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) -RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) - -ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz -RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf -ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ -RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install - -RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars -RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default - -RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo - -RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf - -RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf - -RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ - sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg -RUN cp /etc/services /var/spool/postfix/etc/ - -RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix -ADD nagios.init /etc/sv/nagios/run -ADD apache.init /etc/sv/apache/run -ADD postfix.init /etc/sv/postfix/run -ADD postfix.stop /etc/sv/postfix/finish - -ADD start.sh /usr/local/bin/start_nagios - -ENV APACHE_LOCK_DIR /var/run -ENV APACHE_LOG_DIR /var/log/apache2 - -EXPOSE 80 - -VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] - -CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result deleted file mode 100644 index 25dd3ddfe..000000000 --- a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result +++ /dev/null @@ -1,40 +0,0 @@ -(from "cpuguy83/ubuntu") -(env "NAGIOS_HOME" "/opt/nagios") -(env "NAGIOS_USER" "nagios") -(env "NAGIOS_GROUP" "nagios") -(env "NAGIOS_CMDUSER" "nagios") -(env "NAGIOS_CMDGROUP" "nagios") -(env "NAGIOSADMIN_USER" "nagiosadmin") -(env "NAGIOSADMIN_PASS" "nagios") -(env "APACHE_RUN_USER" "nagios") -(env "APACHE_RUN_GROUP" "nagios") -(env "NAGIOS_TIMEZONE" "UTC") -(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") -(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") -(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") -(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") -(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") -(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") -(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") -(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") -(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") -(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") -(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") -(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") -(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") -(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") -(run "cp /etc/services /var/spool/postfix/etc/") -(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") -(add "nagios.init" "/etc/sv/nagios/run") -(add "apache.init" "/etc/sv/apache/run") -(add "postfix.init" "/etc/sv/postfix/run") -(add "postfix.stop" "/etc/sv/postfix/finish") -(add "start.sh" "/usr/local/bin/start_nagios") -(env "APACHE_LOCK_DIR" "/var/run") -(env "APACHE_LOG_DIR" "/var/log/apache2") -(expose "80") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") -(cmd "/usr/local/bin/start_nagios") diff --git a/builder/dockerfile/parser/testfiles/docker/Dockerfile b/builder/dockerfile/parser/testfiles/docker/Dockerfile deleted file mode 100644 index 9717adbd6..000000000 --- a/builder/dockerfile/parser/testfiles/docker/Dockerfile +++ /dev/null @@ -1,103 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ubuntu:14.04 -MAINTAINER Tianon Gravi (@tianon) - -# Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ - aufs-tools \ - automake \ - btrfs-tools \ - build-essential \ - curl \ - dpkg-sig \ - git \ - iptables \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - pandoc \ - parallel \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.0* \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags -# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm -# (set an explicit GOARM of 5 for maximum compatibility) -ENV GOARM 5 -RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' - -# Grab Go's cover tool for dead-simple code coverage testing -RUN go get golang.org/x/tools/cmd/cover - -# TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 - -# Get the "busybox" image source so we can build locally instead of pulling -RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox - -# Setup s3cmd config -RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/builder/dockerfile/parser/testfiles/docker/result b/builder/dockerfile/parser/testfiles/docker/result deleted file mode 100644 index d032f9bac..000000000 --- a/builder/dockerfile/parser/testfiles/docker/result +++ /dev/null @@ -1,24 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Tianon Gravi (@tianon)") -(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") -(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") -(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") -(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") -(env "PATH" "/usr/local/go/bin:$PATH") -(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") -(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") -(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") -(env "GOARM" "5") -(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") -(run "go get golang.org/x/tools/cmd/cover") -(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") -(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") -(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") -(run "git config --global user.email 'docker-dummy@example.com'") -(run "groupadd -r docker") -(run "useradd --create-home --gid docker unprivilegeduser") -(volume "/var/lib/docker") -(workdir "/go/src/github.com/docker/docker") -(env "DOCKER_BUILDTAGS" "apparmor selinux") -(entrypoint "hack/dind") -(copy "." "/go/src/github.com/docker/docker") diff --git a/builder/dockerfile/parser/testfiles/env/Dockerfile b/builder/dockerfile/parser/testfiles/env/Dockerfile deleted file mode 100644 index 08fa18ace..000000000 --- a/builder/dockerfile/parser/testfiles/env/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu -ENV name value -ENV name=value -ENV name=value name2=value2 -ENV name="value value1" -ENV name=value\ value2 -ENV name="value'quote space'value2" -ENV name='value"double quote"value2' -ENV name=value\ value2 name2=value2\ value3 -ENV name="a\"b" -ENV name="a\'b" -ENV name='a\'b' -ENV name='a\'b'' -ENV name='a\"b' -ENV name="''" -# don't put anything after the next line - it must be the last line of the -# Dockerfile and it must end with \ -ENV name=value \ - name1=value1 \ - name2="value2a \ - value2b" \ - name3="value3a\n\"value3b\"" \ - name4="value4a\\nvalue4b" \ diff --git a/builder/dockerfile/parser/testfiles/env/result b/builder/dockerfile/parser/testfiles/env/result deleted file mode 100644 index ba0a6dd7c..000000000 --- a/builder/dockerfile/parser/testfiles/env/result +++ /dev/null @@ -1,16 +0,0 @@ -(from "ubuntu") -(env "name" "value") -(env "name" "value") -(env "name" "value" "name2" "value2") -(env "name" "\"value value1\"") -(env "name" "value\\ value2") -(env "name" "\"value'quote space'value2\"") -(env "name" "'value\"double quote\"value2'") -(env "name" "value\\ value2" "name2" "value2\\ value3") -(env "name" "\"a\\\"b\"") -(env "name" "\"a\\'b\"") -(env "name" "'a\\'b'") -(env "name" "'a\\'b''") -(env "name" "'a\\\"b'") -(env "name" "\"''\"") -(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile deleted file mode 100644 index 6def7efdc..000000000 --- a/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -# Comment here. Should not be looking for the following parser directive. -# Hence the following line will be ignored, and the subsequent backslash -# continuation will be the default. -# escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH \ -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/result b/builder/dockerfile/parser/testfiles/escape-after-comment/result deleted file mode 100644 index 21522a880..000000000 --- a/builder/dockerfile/parser/testfiles/escape-after-comment/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile deleted file mode 100644 index 08a8cc432..000000000 --- a/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -# escape = `` -# There is no white space line after the directives. This still succeeds, but goes -# against best practices. -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/result b/builder/dockerfile/parser/testfiles/escape-nonewline/result deleted file mode 100644 index 21522a880..000000000 --- a/builder/dockerfile/parser/testfiles/escape-nonewline/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape/Dockerfile b/builder/dockerfile/parser/testfiles/escape/Dockerfile deleted file mode 100644 index ef30414a5..000000000 --- a/builder/dockerfile/parser/testfiles/escape/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -#escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape/result b/builder/dockerfile/parser/testfiles/escape/result deleted file mode 100644 index 21522a880..000000000 --- a/builder/dockerfile/parser/testfiles/escape/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/builder/dockerfile/parser/testfiles/escapes/Dockerfile deleted file mode 100644 index 1ffb17ef0..000000000 --- a/builder/dockerfile/parser/testfiles/escapes/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik \\Hollensbe \" - -RUN apt-get \update && \ - apt-get \"install znc -y -ADD \conf\\" /.znc - -RUN foo \ - -bar \ - -baz - -CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/escapes/result b/builder/dockerfile/parser/testfiles/escapes/result deleted file mode 100644 index 13e409cb1..000000000 --- a/builder/dockerfile/parser/testfiles/escapes/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik \\\\Hollensbe \\\"") -(run "apt-get \\update && apt-get \\\"install znc -y") -(add "\\conf\\\\\"" "/.znc") -(run "foo bar baz") -(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/parser/testfiles/flags/Dockerfile b/builder/dockerfile/parser/testfiles/flags/Dockerfile deleted file mode 100644 index 2418e0f06..000000000 --- a/builder/dockerfile/parser/testfiles/flags/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM scratch -COPY foo /tmp/ -COPY --user=me foo /tmp/ -COPY --doit=true foo /tmp/ -COPY --user=me --doit=true foo /tmp/ -COPY --doit=true -- foo /tmp/ -COPY -- foo /tmp/ -CMD --doit [ "a", "b" ] -CMD --doit=true -- [ "a", "b" ] -CMD --doit -- [ ] diff --git a/builder/dockerfile/parser/testfiles/flags/result b/builder/dockerfile/parser/testfiles/flags/result deleted file mode 100644 index 4578f4cba..000000000 --- a/builder/dockerfile/parser/testfiles/flags/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "scratch") -(copy "foo" "/tmp/") -(copy ["--user=me"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy ["--user=me" "--doit=true"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy "foo" "/tmp/") -(cmd ["--doit"] "a" "b") -(cmd ["--doit=true"] "a" "b") -(cmd ["--doit"]) diff --git a/builder/dockerfile/parser/testfiles/health/Dockerfile b/builder/dockerfile/parser/testfiles/health/Dockerfile deleted file mode 100644 index 081e44288..000000000 --- a/builder/dockerfile/parser/testfiles/health/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM debian -ADD check.sh main.sh /app/ -CMD /app/main.sh -HEALTHCHECK -HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ - CMD /app/check.sh --quiet -HEALTHCHECK CMD -HEALTHCHECK CMD a b -HEALTHCHECK --timeout=3s CMD ["foo"] -HEALTHCHECK CONNECT TCP 7000 diff --git a/builder/dockerfile/parser/testfiles/health/result b/builder/dockerfile/parser/testfiles/health/result deleted file mode 100644 index 092924f88..000000000 --- a/builder/dockerfile/parser/testfiles/health/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "debian") -(add "check.sh" "main.sh" "/app/") -(cmd "/app/main.sh") -(healthcheck) -(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") -(healthcheck "CMD") -(healthcheck "CMD" "a b") -(healthcheck ["--timeout=3s"] "CMD" "foo") -(healthcheck "CONNECT" "TCP 7000") diff --git a/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/builder/dockerfile/parser/testfiles/influxdb/Dockerfile deleted file mode 100644 index 587fb9b54..000000000 --- a/builder/dockerfile/parser/testfiles/influxdb/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install wget -y -RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb -RUN dpkg -i influxdb_latest_amd64.deb -RUN rm -r /opt/influxdb/shared - -VOLUME /opt/influxdb/shared - -CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml - -EXPOSE 8083 -EXPOSE 8086 -EXPOSE 8090 -EXPOSE 8099 diff --git a/builder/dockerfile/parser/testfiles/influxdb/result b/builder/dockerfile/parser/testfiles/influxdb/result deleted file mode 100644 index 0998e87e6..000000000 --- a/builder/dockerfile/parser/testfiles/influxdb/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install wget -y") -(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") -(run "dpkg -i influxdb_latest_amd64.deb") -(run "rm -r /opt/influxdb/shared") -(volume "/opt/influxdb/shared") -(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") -(expose "8083") -(expose "8086") -(expose "8090") -(expose "8099") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile deleted file mode 100644 index 39fe27d99..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result deleted file mode 100644 index afc220c2a..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile deleted file mode 100644 index eaae081a0..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result deleted file mode 100644 index 484804e2b..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile deleted file mode 100644 index c3ac63c07..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result deleted file mode 100644 index 614789120..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "['echo','single quotes are invalid JSON']") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile deleted file mode 100644 index 5fd4afa52..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result deleted file mode 100644 index 1ffbb8ff8..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile deleted file mode 100644 index 30cc4bb48..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "look ma, no quote!] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result deleted file mode 100644 index 32048147b..000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/builder/dockerfile/parser/testfiles/json/Dockerfile b/builder/dockerfile/parser/testfiles/json/Dockerfile deleted file mode 100644 index a58691711..000000000 --- a/builder/dockerfile/parser/testfiles/json/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -CMD [] -CMD [""] -CMD ["a"] -CMD ["a","b"] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/builder/dockerfile/parser/testfiles/json/result b/builder/dockerfile/parser/testfiles/json/result deleted file mode 100644 index c6553e6e1..000000000 --- a/builder/dockerfile/parser/testfiles/json/result +++ /dev/null @@ -1,8 +0,0 @@ -(cmd) -(cmd "") -(cmd "a") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile deleted file mode 100644 index 35f9c24aa..000000000 --- a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER James Turnbull "james@example.com" -ENV REFRESHED_AT 2014-06-01 -RUN apt-get update -RUN apt-get -y install redis-server redis-tools -EXPOSE 6379 -ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result deleted file mode 100644 index b5ac6fe44..000000000 --- a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result +++ /dev/null @@ -1,7 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "James Turnbull \"james@example.com\"") -(env "REFRESHED_AT" "2014-06-01") -(run "apt-get update") -(run "apt-get -y install redis-server redis-tools") -(expose "6379") -(entrypoint "/usr/bin/redis-server") diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile deleted file mode 100644 index 188395fe8..000000000 --- a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM busybox:buildroot-2014.02 - -MAINTAINER docker - -ONBUILD RUN ["echo", "test"] -ONBUILD RUN echo test -ONBUILD COPY . / - - -# RUN Commands \ -# linebreak in comment \ -RUN ["ls", "-la"] -RUN ["echo", "'1234'"] -RUN echo "1234" -RUN echo 1234 -RUN echo '1234' && \ - echo "456" && \ - echo 789 -RUN sh -c 'echo root:testpass \ - > /tmp/passwd' -RUN mkdir -p /test /test2 /test3/test - -# ENV \ -ENV SCUBA 1 DUBA 3 -ENV SCUBA "1 DUBA 3" - -# CMD \ -CMD ["echo", "test"] -CMD echo test -CMD echo "test" -CMD echo 'test' -CMD echo 'test' | wc - - -#EXPOSE\ -EXPOSE 3000 -EXPOSE 9000 5000 6000 - -USER docker -USER docker:root - -VOLUME ["/test"] -VOLUME ["/test", "/test2"] -VOLUME /test3 - -WORKDIR /test - -ADD . / -COPY . copy diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result deleted file mode 100644 index 6f7d57a39..000000000 --- a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result +++ /dev/null @@ -1,29 +0,0 @@ -(from "busybox:buildroot-2014.02") -(maintainer "docker ") -(onbuild (run "echo" "test")) -(onbuild (run "echo test")) -(onbuild (copy "." "/")) -(run "ls" "-la") -(run "echo" "'1234'") -(run "echo \"1234\"") -(run "echo 1234") -(run "echo '1234' && echo \"456\" && echo 789") -(run "sh -c 'echo root:testpass > /tmp/passwd'") -(run "mkdir -p /test /test2 /test3/test") -(env "SCUBA" "1 DUBA 3") -(env "SCUBA" "\"1 DUBA 3\"") -(cmd "echo" "test") -(cmd "echo test") -(cmd "echo \"test\"") -(cmd "echo 'test'") -(cmd "echo 'test' | wc -") -(expose "3000") -(expose "9000" "5000" "6000") -(user "docker") -(user "docker:root") -(volume "/test") -(volume "/test" "/test2") -(volume "/test3") -(workdir "/test") -(add "." "/") -(copy "." "copy") diff --git a/builder/dockerfile/parser/testfiles/mail/Dockerfile b/builder/dockerfile/parser/testfiles/mail/Dockerfile deleted file mode 100644 index f64c1168c..000000000 --- a/builder/dockerfile/parser/testfiles/mail/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y -ADD .muttrc / -ADD .offlineimaprc / -ADD .tmux.conf / -ADD mutt /.mutt -ADD vim /.vim -ADD vimrc /.vimrc -ADD crontab /etc/crontab -RUN chmod 644 /etc/crontab -RUN mkdir /Mail -RUN mkdir /.offlineimap -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD setsid cron; tmux -2 diff --git a/builder/dockerfile/parser/testfiles/mail/result b/builder/dockerfile/parser/testfiles/mail/result deleted file mode 100644 index a0efcf04b..000000000 --- a/builder/dockerfile/parser/testfiles/mail/result +++ /dev/null @@ -1,14 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") -(add ".muttrc" "/") -(add ".offlineimaprc" "/") -(add ".tmux.conf" "/") -(add "mutt" "/.mutt") -(add "vim" "/.vim") -(add "vimrc" "/.vimrc") -(add "crontab" "/etc/crontab") -(run "chmod 644 /etc/crontab") -(run "mkdir /Mail") -(run "mkdir /.offlineimap") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "setsid cron; tmux -2") diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile deleted file mode 100644 index 57bb5976a..000000000 --- a/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM foo - -VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/result b/builder/dockerfile/parser/testfiles/multiple-volumes/result deleted file mode 100644 index 18dbdeeaa..000000000 --- a/builder/dockerfile/parser/testfiles/multiple-volumes/result +++ /dev/null @@ -1,2 +0,0 @@ -(from "foo") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/builder/dockerfile/parser/testfiles/mumble/Dockerfile deleted file mode 100644 index 5b9ec06a6..000000000 --- a/builder/dockerfile/parser/testfiles/mumble/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install libcap2-bin mumble-server -y - -ADD ./mumble-server.ini /etc/mumble-server.ini - -CMD /usr/sbin/murmurd diff --git a/builder/dockerfile/parser/testfiles/mumble/result b/builder/dockerfile/parser/testfiles/mumble/result deleted file mode 100644 index a0036a943..000000000 --- a/builder/dockerfile/parser/testfiles/mumble/result +++ /dev/null @@ -1,4 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install libcap2-bin mumble-server -y") -(add "./mumble-server.ini" "/etc/mumble-server.ini") -(cmd "/usr/sbin/murmurd") diff --git a/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/builder/dockerfile/parser/testfiles/nginx/Dockerfile deleted file mode 100644 index bf8368e1c..000000000 --- a/builder/dockerfile/parser/testfiles/nginx/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install nginx-full -y -RUN rm -rf /etc/nginx -ADD etc /etc/nginx -RUN chown -R root:root /etc/nginx -RUN /usr/sbin/nginx -qt -RUN mkdir /www - -CMD ["/usr/sbin/nginx"] - -VOLUME /www -EXPOSE 80 diff --git a/builder/dockerfile/parser/testfiles/nginx/result b/builder/dockerfile/parser/testfiles/nginx/result deleted file mode 100644 index 56ddb6f25..000000000 --- a/builder/dockerfile/parser/testfiles/nginx/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install nginx-full -y") -(run "rm -rf /etc/nginx") -(add "etc" "/etc/nginx") -(run "chown -R root:root /etc/nginx") -(run "/usr/sbin/nginx -qt") -(run "mkdir /www") -(cmd "/usr/sbin/nginx") -(volume "/www") -(expose "80") diff --git a/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/builder/dockerfile/parser/testfiles/tf2/Dockerfile deleted file mode 100644 index 72b79bdd7..000000000 --- a/builder/dockerfile/parser/testfiles/tf2/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu:12.04 - -EXPOSE 27015 -EXPOSE 27005 -EXPOSE 26901 -EXPOSE 27020 - -RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y -RUN mkdir -p /steam -RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam -ADD ./script /steam/script -RUN /steam/steamcmd.sh +runscript /steam/script -RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf -RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf -ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg -ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg -ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg -RUN rm -r /steam/tf2/tf/addons/sourcemod/configs -ADD ./configs /steam/tf2/tf/addons/sourcemod/configs -RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en -RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en - -CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/dockerfile/parser/testfiles/tf2/result b/builder/dockerfile/parser/testfiles/tf2/result deleted file mode 100644 index d4f94cd8b..000000000 --- a/builder/dockerfile/parser/testfiles/tf2/result +++ /dev/null @@ -1,20 +0,0 @@ -(from "ubuntu:12.04") -(expose "27015") -(expose "27005") -(expose "26901") -(expose "27020") -(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") -(run "mkdir -p /steam") -(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") -(add "./script" "/steam/script") -(run "/steam/steamcmd.sh +runscript /steam/script") -(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") -(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") -(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") -(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") -(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") -(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") -(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") -(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/builder/dockerfile/parser/testfiles/weechat/Dockerfile deleted file mode 100644 index 484208816..000000000 --- a/builder/dockerfile/parser/testfiles/weechat/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y - -ADD .weechat /.weechat -ADD .tmux.conf / -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD zsh -c weechat diff --git a/builder/dockerfile/parser/testfiles/weechat/result b/builder/dockerfile/parser/testfiles/weechat/result deleted file mode 100644 index c3abb4c54..000000000 --- a/builder/dockerfile/parser/testfiles/weechat/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") -(add ".weechat" "/.weechat") -(add ".tmux.conf" "/") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "zsh -c weechat") diff --git a/builder/dockerfile/parser/testfiles/znc/Dockerfile b/builder/dockerfile/parser/testfiles/znc/Dockerfile deleted file mode 100644 index 3a4da6e91..000000000 --- a/builder/dockerfile/parser/testfiles/znc/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install znc -y -ADD conf /.znc - -CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/znc/result b/builder/dockerfile/parser/testfiles/znc/result deleted file mode 100644 index 5493b255f..000000000 --- a/builder/dockerfile/parser/testfiles/znc/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install znc -y") -(add "conf" "/.znc") -(cmd "/usr/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/parser/utils.go b/builder/dockerfile/parser/utils.go deleted file mode 100644 index b21eb62ae..000000000 --- a/builder/dockerfile/parser/utils.go +++ /dev/null @@ -1,176 +0,0 @@ -package parser - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -// Dump dumps the AST defined by `node` as a list of sexps. -// Returns a string suitable for printing. -func (node *Node) Dump() string { - str := "" - str += node.Value - - if len(node.Flags) > 0 { - str += fmt.Sprintf(" %q", node.Flags) - } - - for _, n := range node.Children { - str += "(" + n.Dump() + ")\n" - } - - if node.Next != nil { - for n := node.Next; n != nil; n = n.Next { - if len(n.Children) > 0 { - str += " " + n.Dump() - } else { - str += " " + strconv.Quote(n.Value) - } - } - } - - return strings.TrimSpace(str) -} - -// performs the dispatch based on the two primal strings, cmd and args. Please -// look at the dispatch table in parser.go to see how these dispatchers work. -func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { - fn := dispatch[cmd] - - // Ignore invalid Dockerfile instructions - if fn == nil { - fn = parseIgnore - } - - sexp, attrs, err := fn(args) - if err != nil { - return nil, nil, err - } - - return sexp, attrs, nil -} - -// splitCommand takes a single line of text and parses out the cmd and args, -// which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { - var args string - var flags []string - - // Make sure we get the same results irrespective of leading/trailing spaces - cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) - - if len(cmdline) == 2 { - var err error - args, flags, err = extractBuilderFlags(cmdline[1]) - if err != nil { - return "", nil, "", err - } - } - - return cmd, flags, strings.TrimSpace(args), nil -} - -// covers comments and empty lines. Lines should be trimmed before passing to -// this function. -func stripComments(line string) string { - // string is already trimmed at this point - if tokenComment.MatchString(line) { - return tokenComment.ReplaceAllString(line, "") - } - - return line -} - -func extractBuilderFlags(line string) (string, []string, error) { - // Parses the BuilderFlags and returns the remaining part of the line - - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - - for pos := 0; pos <= len(line); pos++ { - if pos != len(line) { - ch = rune(line[pos]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(line) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - - // Only keep going if the next word starts with -- - if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { - return line[pos:], words, nil - } - - phase = inWord // found someting with "--", fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if word == "--" { - return line[pos:], words, nil - } - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - continue - } - if ch == '\\' { - if pos+1 == len(line) { - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - continue - } - if ch == '\\' { - if pos+1 == len(line) { - phase = inWord - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - } - } - - return "", words, nil -} diff --git a/builder/dockerfile/shell_parser.go b/builder/dockerfile/shell_parser.go deleted file mode 100644 index c71426677..000000000 --- a/builder/dockerfile/shell_parser.go +++ /dev/null @@ -1,314 +0,0 @@ -package dockerfile - -// This will take a single word and an array of env variables and -// process all quotes (" and ') as well as $xxx and ${xxx} env variable -// tokens. Tries to mimic bash shell process. -// It doesn't support all flavors of ${xx:...} formats but new ones can -// be added by adding code to the "special ${} format processing" section - -import ( - "fmt" - "strings" - "text/scanner" - "unicode" -) - -type shellWord struct { - word string - scanner scanner.Scanner - envs []string - pos int -} - -// ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func ProcessWord(word string, env []string) (string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - } - sw.scanner.Init(strings.NewReader(word)) - word, _, err := sw.process() - return word, err -} - -// ProcessWords will use the 'env' list of environment variables, -// and replace any env var references in 'word' then it will also -// return a slice of strings which represents the 'word' -// split up based on spaces - taking into account quotes. Note that -// this splitting is done **after** the env var substitutions are done. -// Note, each one is trimmed to remove leading and trailing spaces (unless -// they are quoted", but ProcessWord retains spaces between words. -func ProcessWords(word string, env []string) ([]string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - } - sw.scanner.Init(strings.NewReader(word)) - _, words, err := sw.process() - return words, err -} - -func (sw *shellWord) process() (string, []string, error) { - return sw.processStopOn(scanner.EOF) -} - -type wordsStruct struct { - word string - words []string - inWord bool -} - -func (w *wordsStruct) addChar(ch rune) { - if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" - w.inWord = false - } - } else if !unicode.IsSpace(ch) { - w.addRawChar(ch) - } -} - -func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) - w.inWord = true -} - -func (w *wordsStruct) addString(str string) { - var scan scanner.Scanner - scan.Init(strings.NewReader(str)) - for scan.Peek() != scanner.EOF { - w.addChar(scan.Next()) - } -} - -func (w *wordsStruct) addRawString(str string) { - w.word += str - w.inWord = true -} - -func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) - - // Just in case we're called again by mistake - w.word = "" - w.inWord = false - } - return w.words -} - -// Process the word, starting at 'pos', and stop when we get to the -// end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result string - var words wordsStruct - - var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, - } - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - - if stopChar != scanner.EOF && ch == stopChar { - sw.scanner.Next() - break - } - if fn, ok := charFuncMapping[ch]; ok { - // Call special processing func for certain chars - tmp, err := fn() - if err != nil { - return "", []string{}, err - } - result += tmp - - if ch == rune('$') { - words.addString(tmp) - } else { - words.addRawString(tmp) - } - } else { - // Not special, just add it to the result - ch = sw.scanner.Next() - - if ch == '\\' { - // '\' escapes, except end of line - - ch = sw.scanner.Next() - - if ch == scanner.EOF { - break - } - - words.addRawChar(ch) - } else { - words.addChar(ch) - } - - result += string(ch) - } - } - - return result, words.getWords(), nil -} - -func (sw *shellWord) processSingleQuote() (string, error) { - // All chars between single quotes are taken as-is - // Note, you can't escape ' - var result string - - sw.scanner.Next() - - for { - ch := sw.scanner.Next() - if ch == '\'' || ch == scanner.EOF { - break - } - result += string(ch) - } - - return result, nil -} - -func (sw *shellWord) processDoubleQuote() (string, error) { - // All chars up to the next " are taken as-is, even ', except any $ chars - // But you can escape " with a \ - var result string - - sw.scanner.Next() - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if ch == '"' { - sw.scanner.Next() - break - } - if ch == '$' { - tmp, err := sw.processDollar() - if err != nil { - return "", err - } - result += tmp - } else { - ch = sw.scanner.Next() - if ch == '\\' { - chNext := sw.scanner.Peek() - - if chNext == scanner.EOF { - // Ignore \ at end of word - continue - } - - if chNext == '"' || chNext == '$' { - // \" and \$ can be escaped, all other \'s are left as-is - ch = sw.scanner.Next() - } - } - result += string(ch) - } - } - - return result, nil -} - -func (sw *shellWord) processDollar() (string, error) { - sw.scanner.Next() - ch := sw.scanner.Peek() - if ch == '{' { - sw.scanner.Next() - name := sw.processName() - ch = sw.scanner.Peek() - if ch == '}' { - // Normal ${xx} case - sw.scanner.Next() - return sw.getEnv(name), nil - } - if ch == ':' { - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - - sw.scanner.Next() // skip over : - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - return newValue, nil - - default: - return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) - } - } - return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) - } - // $xxx case - name := sw.processName() - if name == "" { - return "$", nil - } - return sw.getEnv(name), nil -} - -func (sw *shellWord) processName() string { - // Read in a name (alphanumeric or _) - // If it starts with a numeric then just return $# - var name string - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if len(name) == 0 && unicode.IsDigit(ch) { - ch = sw.scanner.Next() - return string(ch) - } - if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { - break - } - ch = sw.scanner.Next() - name += string(ch) - } - - return name -} - -func (sw *shellWord) getEnv(name string) string { - for _, env := range sw.envs { - i := strings.Index(env, "=") - if i < 0 { - if name == env { - // Should probably never get here, but just in case treat - // it like "var" and "var=" are the same - return "" - } - continue - } - if name != env[:i] { - continue - } - return env[i+1:] - } - return "" -} diff --git a/builder/dockerfile/shell_parser_test.go b/builder/dockerfile/shell_parser_test.go deleted file mode 100644 index 81ac591e9..000000000 --- a/builder/dockerfile/shell_parser_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package dockerfile - -import ( - "bufio" - "os" - "strings" - "testing" -) - -func TestShellParser4EnvVars(t *testing.T) { - fn := "envVarTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} - for scanner.Scan() { - line := scanner.Text() - - // Trim comments and blank lines - i := strings.Index(line, "#") - if i >= 0 { - line = line[:i] - } - line = strings.TrimSpace(line) - - if line == "" { - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) - } - - words[0] = strings.TrimSpace(words[0]) - words[1] = strings.TrimSpace(words[1]) - - newWord, err := ProcessWord(words[0], envs) - - if err != nil { - newWord = "error" - } - - if newWord != words[1] { - t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) - } - } -} - -func TestShellParser4Words(t *testing.T) { - fn := "wordsTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - envs := []string{} - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, "#") { - continue - } - - if strings.HasPrefix(line, "ENV ") { - line = strings.TrimLeft(line[3:], " ") - envs = append(envs, line) - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) - } - test := strings.TrimSpace(words[0]) - expected := strings.Split(strings.TrimLeft(words[1], " "), ",") - - result, err := ProcessWords(test, envs) - - if err != nil { - result = []string{"error"} - } - - if len(result) != len(expected) { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - for i, w := range expected { - if w != result[i] { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - } - } -} - -func TestGetEnv(t *testing.T) { - sw := &shellWord{ - word: "", - envs: nil, - pos: 0, - } - - sw.envs = []string{} - if sw.getEnv("foo") != "" { - t.Fatalf("2 - 'foo' should map to ''") - } - - sw.envs = []string{"foo"} - if sw.getEnv("foo") != "" { - t.Fatalf("3 - 'foo' should map to ''") - } - - sw.envs = []string{"foo="} - if sw.getEnv("foo") != "" { - t.Fatalf("4 - 'foo' should map to ''") - } - - sw.envs = []string{"foo=bar"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("5 - 'foo' should map to 'bar'") - } - - sw.envs = []string{"foo=bar", "car=hat"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("6 - 'foo' should map to 'bar'") - } - if sw.getEnv("car") != "hat" { - t.Fatalf("7 - 'car' should map to 'hat'") - } - - // Make sure we grab the first 'car' in the list - sw.envs = []string{"foo=bar", "car=hat", "car=bike"} - if sw.getEnv("car") != "hat" { - t.Fatalf("8 - 'car' should map to 'hat'") - } -} diff --git a/builder/dockerfile/support.go b/builder/dockerfile/support.go deleted file mode 100644 index e87588910..000000000 --- a/builder/dockerfile/support.go +++ /dev/null @@ -1,19 +0,0 @@ -package dockerfile - -import "strings" - -// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile -// for exec form it returns untouched args slice -// for shell form it returns concatenated args as the first element of a slice -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} diff --git a/builder/dockerfile/support_test.go b/builder/dockerfile/support_test.go deleted file mode 100644 index 7cc6fe9dc..000000000 --- a/builder/dockerfile/support_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockerfile - -import "testing" - -type testCase struct { - name string - args []string - attributes map[string]bool - expected []string -} - -func initTestCases() []testCase { - testCases := []testCase{} - - testCases = append(testCases, testCase{ - name: "empty args", - args: []string{}, - attributes: make(map[string]bool), - expected: []string{}, - }) - - jsonAttributes := make(map[string]bool) - jsonAttributes["json"] = true - - testCases = append(testCases, testCase{ - name: "json attribute with one element", - args: []string{"foo"}, - attributes: jsonAttributes, - expected: []string{"foo"}, - }) - - testCases = append(testCases, testCase{ - name: "json attribute with two elements", - args: []string{"foo", "bar"}, - attributes: jsonAttributes, - expected: []string{"foo", "bar"}, - }) - - testCases = append(testCases, testCase{ - name: "no attributes", - args: []string{"foo", "bar"}, - attributes: nil, - expected: []string{"foo bar"}, - }) - - return testCases -} - -func TestHandleJSONArgs(t *testing.T) { - testCases := initTestCases() - - for _, test := range testCases { - arguments := handleJSONArgs(test.args, test.attributes) - - if len(arguments) != len(test.expected) { - t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) - } - - for i := range test.expected { - if arguments[i] != test.expected[i] { - t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) - } - } - } -} diff --git a/builder/dockerfile/utils_test.go b/builder/dockerfile/utils_test.go deleted file mode 100644 index 80a3f1bab..000000000 --- a/builder/dockerfile/utils_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package dockerfile - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -// createTestTempDir creates a temporary directory for testing. -// It returns the created path and a cleanup function which is meant to be used as deferred call. -// When an error occurs, it terminates the test. -func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path, func() { - err = os.RemoveAll(path) - - if err != nil { - t.Fatalf("Error when removing directory %s: %s", path, err) - } - } -} - -// createTestTempFile creates a temporary file within dir with specific contents and permissions. -// When an error occurs, it terminates the test -func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { - filePath := filepath.Join(dir, filename) - err := ioutil.WriteFile(filePath, []byte(contents), perm) - - if err != nil { - t.Fatalf("Error when creating %s file: %s", filename, err) - } - - return filePath -} - -// createTestSymlink creates a symlink file within dir which points to oldname -func createTestSymlink(t *testing.T, dir, filename, oldname string) string { - filePath := filepath.Join(dir, filename) - if err := os.Symlink(oldname, filePath); err != nil { - t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) - } - - return filePath -} diff --git a/builder/dockerfile/wordsTest b/builder/dockerfile/wordsTest deleted file mode 100644 index fa916c67f..000000000 --- a/builder/dockerfile/wordsTest +++ /dev/null @@ -1,25 +0,0 @@ -hello | hello -hello${hi}bye | hellobye -ENV hi=hi -hello${hi}bye | hellohibye -ENV space=abc def -hello${space}bye | helloabc,defbye -hello"${space}"bye | helloabc defbye -hello "${space}"bye | hello,abc defbye -ENV leading= ab c -hello${leading}def | hello,ab,cdef -hello"${leading}" def | hello ab c,def -hello"${leading}" | hello ab c -hello${leading} | hello,ab,c -# next line MUST have 3 trailing spaces, don't erase them! -ENV trailing=ab c -hello${trailing} | helloab,c -hello${trailing}d | helloab,c,d -hello"${trailing}"d | helloab c d -# next line MUST have 3 trailing spaces, don't erase them! -hel"lo${trailing}" | helloab c -hello" there " | hello there -hello there | hello,there -hello\ there | hello there -hello" there | hello there -hello\" there | hello",there diff --git a/builder/dockerignore.go b/builder/dockerignore.go deleted file mode 100644 index 2990770a4..000000000 --- a/builder/dockerignore.go +++ /dev/null @@ -1,47 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" -) - -// DockerIgnoreContext wraps a ModifiableContext to add a method -// for handling the .dockerignore file at the root of the context. -type DockerIgnoreContext struct { - ModifiableContext -} - -// Process reads the .dockerignore file at the root of the embedded context. -// If .dockerignore does not exist in the context, then nil is returned. -// -// It can take a list of files to be removed after .dockerignore is removed. -// This is used for server-side implementations of builders that need to send -// the .dockerignore file as well as the special files specified in filesToRemove, -// but expect them to be excluded from the context after they were processed. -// -// For example, server-side Dockerfile builders are expected to pass in the name -// of the Dockerfile to be removed after it was parsed. -// -// TODO: Don't require a ModifiableContext (use Context instead) and don't remove -// files, instead handle a list of files to be excluded from the context. -func (c DockerIgnoreContext) Process(filesToRemove []string) error { - f, err := c.Open(".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - excludes, _ := dockerignore.ReadAll(f) - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - rm, _ := fileutils.Matches(fileToRemove, excludes) - if rm { - c.Remove(fileToRemove) - } - } - return nil -} diff --git a/builder/dockerignore/dockerignore.go b/builder/dockerignore/dockerignore.go deleted file mode 100644 index 9ddf5dd51..000000000 --- a/builder/dockerignore/dockerignore.go +++ /dev/null @@ -1,49 +0,0 @@ -package dockerignore - -import ( - "bufio" - "bytes" - "fmt" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadAll(reader io.ReadCloser) ([]string, error) { - if reader == nil { - return nil, nil - } - defer reader.Close() - scanner := bufio.NewScanner(reader) - var excludes []string - currentLine := 0 - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} diff --git a/builder/dockerignore/dockerignore_test.go b/builder/dockerignore/dockerignore_test.go deleted file mode 100644 index 361b04191..000000000 --- a/builder/dockerignore/dockerignore_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package dockerignore - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestReadAll(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "dockerignore-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - di, err := ReadAll(nil) - if err != nil { - t.Fatalf("Expected not to have error, got %v", err) - } - - if diLen := len(di); diLen != 0 { - t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) - } - - diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") - err = ioutil.WriteFile(diName, []byte(content), 0777) - if err != nil { - t.Fatal(err) - } - - diFd, err := os.Open(diName) - if err != nil { - t.Fatal(err) - } - di, err = ReadAll(diFd) - if err != nil { - t.Fatal(err) - } - - if di[0] != "test1" { - t.Fatalf("First element is not test1") - } - if di[1] != "/test2" { - t.Fatalf("Second element is not /test2") - } - if di[2] != "/a/file/here" { - t.Fatalf("Third element is not /a/file/here") - } - if di[3] != "lastfile" { - t.Fatalf("Fourth element is not lastfile") - } -} diff --git a/builder/dockerignore_test.go b/builder/dockerignore_test.go deleted file mode 100644 index 3c0ceda4c..000000000 --- a/builder/dockerignore_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package builder - -import ( - "io/ioutil" - "log" - "os" - "sort" - "testing" -) - -const shouldStayFilename = "should_stay" - -func extractFilenames(files []os.FileInfo) []string { - filenames := make([]string, len(files), len(files)) - - for i, file := range files { - filenames[i] = file.Name() - } - - return filenames -} - -func checkDirectory(t *testing.T, dir string, expectedFiles []string) { - files, err := ioutil.ReadDir(dir) - - if err != nil { - t.Fatalf("Could not read directory: %s", err) - } - - if len(files) != len(expectedFiles) { - log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) - } - - filenames := extractFilenames(files) - sort.Strings(filenames) - sort.Strings(expectedFiles) - - for i, filename := range filenames { - if filename != expectedFiles[i] { - t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) - } - } -} - -func executeProcess(t *testing.T, contextDir string) { - modifiableCtx := &tarSumContext{root: contextDir} - ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} - - err := ctx.Process([]string{DefaultDockerfileName}) - - if err != nil { - t.Fatalf("Error when executing Process: %s", err) - } -} - -func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename}) - -} - -func TestProcessNoDockerignore(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) - -} - -func TestProcessShouldLeaveAllFiles(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) - -} diff --git a/builder/git.go b/builder/git.go deleted file mode 100644 index 74df24461..000000000 --- a/builder/git.go +++ /dev/null @@ -1,28 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/gitutils" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (ModifiableContext, error) { - root, err := gitutils.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - // TODO: print errors? - c.Close() - os.RemoveAll(root) - }() - return MakeTarSumContext(c) -} diff --git a/builder/remote.go b/builder/remote.go deleted file mode 100644 index 12f34c7b6..000000000 --- a/builder/remote.go +++ /dev/null @@ -1,152 +0,0 @@ -package builder - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "regexp" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/urlutil" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// MakeRemoteContext downloads a context from remoteURL and returns it. -// -// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of -// maxPreambleLength bytes from the body to help detecting the MIME type. -// Look at acceptableRemoteMIME for more details. -// -// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected -// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { - f, err := httputils.Download(remoteURL) - if err != nil { - return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err) - } - defer f.Body.Close() - - var contextReader io.ReadCloser - if contentTypeHandlers != nil { - contentType := f.Header.Get("Content-Type") - clen := f.ContentLength - - contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) - if err != nil { - return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err) - } - defer contextReader.Close() - - // This loop tries to find a content-type handler for the detected content-type. - // If it could not find one from the caller-supplied map, it tries the empty content-type `""` - // which is interpreted as a fallback handler (usually used for raw tar contexts). - for _, ct := range []string{contentType, ""} { - if fn, ok := contentTypeHandlers[ct]; ok { - defer contextReader.Close() - if contextReader, err = fn(contextReader); err != nil { - return nil, err - } - break - } - } - } - - // Pass through - this is a pre-packaged context, presumably - // with a Dockerfile with the right name inside it. - return MakeTarSumContext(contextReader) -} - -// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used -// irrespective of user input. -// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). -func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { - switch { - case remoteURL == "": - context, err = MakeTarSumContext(r) - case urlutil.IsGitURL(remoteURL): - context, err = MakeGitContext(remoteURL) - case urlutil.IsURL(remoteURL): - context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller - // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. - dockerfileName = DefaultDockerfileName - - // TODO: return a context without tarsum - return archive.Generate(dockerfileName, string(dockerfile)) - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return createProgressReader(rc), nil - }, - }) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, r, errors.New("Empty response") - } - if err != nil && err != io.EOF { - return ct, r, err - } - - preambleR := bytes.NewReader(preamble) - bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { - contentType, _, err = httputils.DetectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/builder/remote_test.go b/builder/remote_test.go deleted file mode 100644 index 20ee02a57..000000000 --- a/builder/remote_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package builder - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" -) - -var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic - -func TestSelectAcceptableMIME(t *testing.T) { - validMimeStrings := []string{ - "application/x-bzip2", - "application/bzip2", - "application/gzip", - "application/x-gzip", - "application/x-xz", - "application/xz", - "application/tar", - "application/x-tar", - "application/octet-stream", - "text/plain", - } - - invalidMimeStrings := []string{ - "", - "application/octet", - "application/json", - } - - for _, m := range invalidMimeStrings { - if len(selectAcceptableMIME(m)) > 0 { - t.Fatalf("Should not have accepted %q", m) - } - } - - for _, m := range validMimeStrings { - if str := selectAcceptableMIME(m); str == "" { - t.Fatalf("Should have accepted %q", m) - } - } -} - -func TestInspectEmptyResponse(t *testing.T) { - ct := "application/octet-stream" - br := ioutil.NopCloser(bytes.NewReader([]byte(""))) - contentType, bReader, err := inspectResponse(ct, br, 0) - if err == nil { - t.Fatalf("Should have generated an error for an empty response") - } - if contentType != "application/octet-stream" { - t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if len(body) != 0 { - t.Fatal("response body should remain empty") - } -} - -func TestInspectResponseBinary(t *testing.T) { - ct := "application/octet-stream" - br := ioutil.NopCloser(bytes.NewReader(binaryContext)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) - if err != nil { - t.Fatal(err) - } - if contentType != "application/octet-stream" { - t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if len(body) != len(binaryContext) { - t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) - } - for i := range body { - if body[i] != binaryContext[i] { - t.Fatalf("Corrupted response body at byte index %d", i) - } - } -} - -func TestResponseUnsupportedContentType(t *testing.T) { - content := []byte(dockerfileContents) - ct := "application/json" - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) - - if err == nil { - t.Fatal("Should have returned an error on content-type 'application/json'") - } - if contentType != ct { - t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestInspectResponseTextSimple(t *testing.T) { - content := []byte(dockerfileContents) - ct := "text/plain" - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) - if err != nil { - t.Fatal(err) - } - if contentType != "text/plain" { - t.Fatalf("Content type should be 'text/plain' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestInspectResponseEmptyContentType(t *testing.T) { - content := []byte(dockerfileContents) - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) - if err != nil { - t.Fatal(err) - } - if contentType != "text/plain" { - t.Fatalf("Content type should be 'text/plain' but is %q", contentType) - } - body, err := ioutil.ReadAll(bodyReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestMakeRemoteContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/" + DefaultDockerfileName - remoteURL := serverURL.String() - - mux.Handle("/", http.FileServer(http.Dir(contextDir))) - - remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - return archive.Generate(DefaultDockerfileName, string(dockerfile)) - }, - }) - - if err != nil { - t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) - } - - if remoteContext == nil { - t.Fatalf("Remote context should not be nil") - } - - tarSumCtx, ok := remoteContext.(*tarSumContext) - - if !ok { - t.Fatalf("Cast error, remote context should be casted to tarSumContext") - } - - fileInfoSums := tarSumCtx.sums - - if fileInfoSums.Len() != 1 { - t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) - } - - fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) - - if fileInfo == nil { - t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) - } - - if fileInfo.Pos() != 0 { - t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) - } -} diff --git a/builder/tarsum.go b/builder/tarsum.go deleted file mode 100644 index 48372cb01..000000000 --- a/builder/tarsum.go +++ /dev/null @@ -1,158 +0,0 @@ -package builder - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/tarsum" -) - -type tarSumContext struct { - root string - sums tarsum.FileInfoSums -} - -func (c *tarSumContext) Close() error { - return os.RemoveAll(c.root) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return nil, err - } - r, err := os.Open(fullpath) - if err != nil { - return nil, convertPathError(err, cleanpath) - } - return r, nil -} - -func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return "", nil, err - } - - st, err := os.Lstat(fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - sum := path - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(rel); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} - return rel, fi, nil -} - -// MakeTarSumContext returns a build Context from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - tsc := &tarSumContext{root: root} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - if err := chrootarchive.Untar(sum, root, nil); err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) - } - _, err = os.Lstat(fullpath) - if err != nil { - return "", "", convertPathError(err, path) - } - return -} - -func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { - root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) - return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return err - } - if rel == "." { - return nil - } - - sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} - if err := walkFn(rel, fi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *tarSumContext) Remove(path string) error { - _, fullpath, err := c.normalize(path) - if err != nil { - return err - } - return os.RemoveAll(fullpath) -} diff --git a/builder/tarsum_test.go b/builder/tarsum_test.go deleted file mode 100644 index cc3f6f515..000000000 --- a/builder/tarsum_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package builder - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -const ( - filename = "test" - contents = "contents test" -) - -func init() { - reexec.Init() -} - -func TestCloseRootDirectory(t *testing.T) { - contextDir, err := ioutil.TempDir("", "builder-tarsum-test") - - if err != nil { - t.Fatalf("Error with creating temporary directory: %s", err) - } - - tarsum := &tarSumContext{root: contextDir} - - err = tarsum.Close() - - if err != nil { - t.Fatalf("Error while executing Close: %s", err) - } - - _, err = os.Stat(contextDir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - defer os.RemoveAll(contextDir) - } -} - -func TestOpenFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open(filename) - - if err != nil { - t.Fatalf("Error when executing Open: %s", err) - } - - defer file.Close() - - scanner := bufio.NewScanner(file) - buff := bytes.NewBufferString("") - - for scanner.Scan() { - buff.WriteString(scanner.Text()) - } - - if contents != buff.String() { - t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) - } - -} - -func TestOpenNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open("not-existing") - - if file != nil { - t.Fatal("Opened file should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("Error when executing Open: %s", err) - } -} - -func TestStatFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat(filename) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != filename { - t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatSubdir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relativePath, err := filepath.Rel(contextDir, testFilename) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - relPath, fileInfo, err := tarSum.Stat(relativePath) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != relativePath { - t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat("not-existing") - - if relPath != "" { - t.Fatal("Relative path should be nil") - } - - if fileInfo != nil { - t.Fatalf("File info should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("This file should not exist: %s", err) - } -} - -func TestRemoveDirectory(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - relativePath, err := filepath.Rel(contextDir, contextSubdir) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - tarSum := &tarSumContext{root: contextDir} - - err = tarSum.Remove(relativePath) - - if err != nil { - t.Fatalf("Error when executing Remove: %s", err) - } - - _, err = os.Stat(contextSubdir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - } -} - -func TestMakeSumTarContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("error: %s", err) - } - - defer tarStream.Close() - - tarSum, err := MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when executing MakeSumContext: %s", err) - } - - if tarSum == nil { - t.Fatalf("Tar sum context should not be nil") - } -} - -func TestWalkWithoutError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return nil - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err != nil { - t.Fatalf("Error when executing Walk: %s", err) - } -} - -type WalkError struct { -} - -func (we WalkError) Error() string { - return "Error when executing Walk" -} - -func TestWalkWithError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return WalkError{} - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err == nil { - t.Fatalf("Error should not be nil") - } -} diff --git a/builder/utils_test.go b/builder/utils_test.go deleted file mode 100644 index 1101ff1d1..000000000 --- a/builder/utils_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package builder - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -const ( - dockerfileContents = "FROM busybox" - dockerignoreFilename = ".dockerignore" - testfileContents = "test" -) - -// createTestTempDir creates a temporary directory for testing. -// It returns the created path and a cleanup function which is meant to be used as deferred call. -// When an error occurs, it terminates the test. -func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path, func() { - err = os.RemoveAll(path) - - if err != nil { - t.Fatalf("Error when removing directory %s: %s", path, err) - } - } -} - -// createTestTempSubdir creates a temporary directory for testing. -// It returns the created path but doesn't provide a cleanup function, -// so createTestTempSubdir should be used only for creating temporary subdirectories -// whose parent directories are properly cleaned up. -// When an error occurs, it terminates the test. -func createTestTempSubdir(t *testing.T, dir, prefix string) string { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path -} - -// createTestTempFile creates a temporary file within dir with specific contents and permissions. -// When an error occurs, it terminates the test -func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { - filePath := filepath.Join(dir, filename) - err := ioutil.WriteFile(filePath, []byte(contents), perm) - - if err != nil { - t.Fatalf("Error when creating %s file: %s", filename, err) - } - - return filePath -} - -// chdir changes current working directory to dir. -// It returns a function which changes working directory back to the previous one. -// This function is meant to be executed as a deferred call. -// When an error occurs, it terminates the test. -func chdir(t *testing.T, dir string) func() { - workingDirectory, err := os.Getwd() - - if err != nil { - t.Fatalf("Error when retrieving working directory: %s", err) - } - - err = os.Chdir(dir) - - if err != nil { - t.Fatalf("Error when changing directory to %s: %s", dir, err) - } - - return func() { - err = os.Chdir(workingDirectory) - - if err != nil { - t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) - } - } -} diff --git a/cli/cli.go b/cli/cli.go deleted file mode 100644 index f6d48d6fa..000000000 --- a/cli/cli.go +++ /dev/null @@ -1,196 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - - flag "github.com/docker/docker/pkg/mflag" -) - -// Cli represents a command line interface. -type Cli struct { - Stderr io.Writer - handlers []Handler - Usage func() -} - -// Handler holds the different commands Cli will call -// It should have methods with names starting with `Cmd` like: -// func (h myHandler) CmdFoo(args ...string) error -type Handler interface { - Command(name string) func(...string) error -} - -// Initializer can be optionally implemented by a Handler to -// initialize before each call to one of its commands. -type Initializer interface { - Initialize() error -} - -// New instantiates a ready-to-use Cli. -func New(handlers ...Handler) *Cli { - // make the generic Cli object the first cli handler - // in order to handle `docker help` appropriately - cli := new(Cli) - cli.handlers = append([]Handler{cli}, handlers...) - return cli -} - -var errCommandNotFound = errors.New("command not found") - -func (cli *Cli) command(args ...string) (func(...string) error, error) { - for _, c := range cli.handlers { - if c == nil { - continue - } - if cmd := c.Command(strings.Join(args, " ")); cmd != nil { - if ci, ok := c.(Initializer); ok { - if err := ci.Initialize(); err != nil { - return nil, err - } - } - return cmd, nil - } - } - return nil, errCommandNotFound -} - -// Run executes the specified command. -func (cli *Cli) Run(args ...string) error { - if len(args) > 1 { - command, err := cli.command(args[:2]...) - if err == nil { - return command(args[2:]...) - } - if err != errCommandNotFound { - return err - } - } - if len(args) > 0 { - command, err := cli.command(args[0]) - if err != nil { - if err == errCommandNotFound { - cli.noSuchCommand(args[0]) - return nil - } - return err - } - return command(args[1:]...) - } - return cli.CmdHelp() -} - -func (cli *Cli) noSuchCommand(command string) { - if cli.Stderr == nil { - cli.Stderr = os.Stderr - } - fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) - os.Exit(1) -} - -// Command returns a command handler, or nil if the command does not exist -func (cli *Cli) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "help": cli.CmdHelp, - }[name] -} - -// CmdHelp displays information on a Docker command. -// -// If more than one command is specified, information is only shown for the first command. -// -// Usage: docker help COMMAND or docker COMMAND --help -func (cli *Cli) CmdHelp(args ...string) error { - if len(args) > 1 { - command, err := cli.command(args[:2]...) - if err == nil { - command("--help") - return nil - } - if err != errCommandNotFound { - return err - } - } - if len(args) > 0 { - command, err := cli.command(args[0]) - if err != nil { - if err == errCommandNotFound { - cli.noSuchCommand(args[0]) - return nil - } - return err - } - command("--help") - return nil - } - - if cli.Usage == nil { - flag.Usage() - } else { - cli.Usage() - } - - return nil -} - -// Subcmd is a subcommand of the main "docker" command. -// A subcommand represents an action that can be performed -// from the Docker command line client. -// -// To see all available subcommands, run "docker --help". -func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { - var errorHandling flag.ErrorHandling - if exitOnError { - errorHandling = flag.ExitOnError - } else { - errorHandling = flag.ContinueOnError - } - flags := flag.NewFlagSet(name, errorHandling) - flags.Usage = func() { - flags.ShortUsage() - flags.PrintDefaults() - } - - flags.ShortUsage = func() { - options := "" - if flags.FlagCountUndeprecated() > 0 { - options = " [OPTIONS]" - } - - if len(synopses) == 0 { - synopses = []string{""} - } - - // Allow for multiple command usage synopses. - for i, synopsis := range synopses { - lead := "\t" - if i == 0 { - // First line needs the word 'Usage'. - lead = "Usage:\t" - } - - if synopsis != "" { - synopsis = " " + synopsis - } - - fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) - } - - fmt.Fprintf(flags.Out(), "\n\n%s\n", description) - } - - return flags -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/cli/cobraadaptor/adaptor.go b/cli/cobraadaptor/adaptor.go deleted file mode 100644 index 589e46c16..000000000 --- a/cli/cobraadaptor/adaptor.go +++ /dev/null @@ -1,154 +0,0 @@ -package cobraadaptor - -import ( - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/container" - "github.com/docker/docker/api/client/image" - "github.com/docker/docker/api/client/network" - "github.com/docker/docker/api/client/node" - "github.com/docker/docker/api/client/plugin" - "github.com/docker/docker/api/client/registry" - "github.com/docker/docker/api/client/service" - "github.com/docker/docker/api/client/stack" - "github.com/docker/docker/api/client/swarm" - "github.com/docker/docker/api/client/system" - "github.com/docker/docker/api/client/volume" - "github.com/docker/docker/cli" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/pkg/term" - "github.com/spf13/cobra" -) - -// CobraAdaptor is an adaptor for supporting spf13/cobra commands in the -// docker/cli framework -type CobraAdaptor struct { - rootCmd *cobra.Command - dockerCli *client.DockerCli -} - -// NewCobraAdaptor returns a new handler -func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor { - stdin, stdout, stderr := term.StdStreams() - dockerCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) - - var rootCmd = &cobra.Command{ - Use: "docker", - SilenceUsage: true, - SilenceErrors: true, - } - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(cli.FlagErrorFunc) - rootCmd.SetOutput(stdout) - rootCmd.AddCommand( - node.NewNodeCommand(dockerCli), - service.NewServiceCommand(dockerCli), - stack.NewStackCommand(dockerCli), - stack.NewTopLevelDeployCommand(dockerCli), - swarm.NewSwarmCommand(dockerCli), - container.NewAttachCommand(dockerCli), - container.NewCommitCommand(dockerCli), - container.NewCopyCommand(dockerCli), - container.NewCreateCommand(dockerCli), - container.NewDiffCommand(dockerCli), - container.NewExportCommand(dockerCli), - container.NewKillCommand(dockerCli), - container.NewLogsCommand(dockerCli), - container.NewPauseCommand(dockerCli), - container.NewPortCommand(dockerCli), - container.NewPsCommand(dockerCli), - container.NewRenameCommand(dockerCli), - container.NewRestartCommand(dockerCli), - container.NewRmCommand(dockerCli), - container.NewRunCommand(dockerCli), - container.NewStartCommand(dockerCli), - container.NewStatsCommand(dockerCli), - container.NewStopCommand(dockerCli), - container.NewTopCommand(dockerCli), - container.NewUnpauseCommand(dockerCli), - container.NewUpdateCommand(dockerCli), - container.NewWaitCommand(dockerCli), - image.NewBuildCommand(dockerCli), - image.NewHistoryCommand(dockerCli), - image.NewImagesCommand(dockerCli), - image.NewLoadCommand(dockerCli), - image.NewRemoveCommand(dockerCli), - image.NewSaveCommand(dockerCli), - image.NewPullCommand(dockerCli), - image.NewPushCommand(dockerCli), - image.NewSearchCommand(dockerCli), - image.NewImportCommand(dockerCli), - image.NewTagCommand(dockerCli), - network.NewNetworkCommand(dockerCli), - system.NewEventsCommand(dockerCli), - registry.NewLoginCommand(dockerCli), - registry.NewLogoutCommand(dockerCli), - system.NewVersionCommand(dockerCli), - volume.NewVolumeCommand(dockerCli), - system.NewInfoCommand(dockerCli), - ) - plugin.NewPluginCommand(rootCmd, dockerCli) - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") - - return CobraAdaptor{ - rootCmd: rootCmd, - dockerCli: dockerCli, - } -} - -// Usage returns the list of commands and their short usage string for -// all top level cobra commands. -func (c CobraAdaptor) Usage() []cli.Command { - cmds := []cli.Command{} - for _, cmd := range c.rootCmd.Commands() { - if cmd.Name() != "" { - cmds = append(cmds, cli.Command{Name: cmd.Name(), Description: cmd.Short}) - } - } - return cmds -} - -func (c CobraAdaptor) run(cmd string, args []string) error { - if err := c.dockerCli.Initialize(); err != nil { - return err - } - // Prepend the command name to support normal cobra command delegation - c.rootCmd.SetArgs(append([]string{cmd}, args...)) - return c.rootCmd.Execute() -} - -// Command returns a cli command handler if one exists -func (c CobraAdaptor) Command(name string) func(...string) error { - for _, cmd := range c.rootCmd.Commands() { - if cmd.Name() == name { - return func(args ...string) error { - return c.run(name, args) - } - } - } - return nil -} - -var usageTemplate = `Usage: {{if not .HasSubCommands}}{{if .HasLocalFlags}}{{appendIfNotPresent .UseLine "[OPTIONS]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasSubCommands}}{{ .CommandPath}} COMMAND{{end}} - -{{with or .Long .Short }}{{. | trim}}{{end}}{{if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{ .Example }}{{end}}{{if .HasFlags}} - -Options: -{{.Flags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableSubCommands}} - -Commands:{{range .Commands}}{{if .IsAvailableCommand}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command.{{end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/cli/error.go b/cli/error.go deleted file mode 100644 index e421c7f7c..000000000 --- a/cli/error.go +++ /dev/null @@ -1,20 +0,0 @@ -package cli - -import "strings" - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} diff --git a/cli/flagerrors.go b/cli/flagerrors.go deleted file mode 100644 index aab8a9884..000000000 --- a/cli/flagerrors.go +++ /dev/null @@ -1,21 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -// FlagErrorFunc prints an error messages which matches the format of the -// docker/docker/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return err - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return fmt.Errorf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage) -} diff --git a/cli/flags/client.go b/cli/flags/client.go deleted file mode 100644 index cc7309db4..000000000 --- a/cli/flags/client.go +++ /dev/null @@ -1,12 +0,0 @@ -package flags - -import flag "github.com/docker/docker/pkg/mflag" - -// ClientFlags represents flags for the docker client. -type ClientFlags struct { - FlagSet *flag.FlagSet - Common *CommonFlags - PostParse func() - - ConfigDir string -} diff --git a/cli/flags/common.go b/cli/flags/common.go deleted file mode 100644 index 4726b04f2..000000000 --- a/cli/flags/common.go +++ /dev/null @@ -1,123 +0,0 @@ -package flags - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - // DefaultTrustKeyFile is the default filename for the trust key - DefaultTrustKeyFile = "key.json" - // DefaultCaFile is the default filename for the CA pem file - DefaultCaFile = "ca.pem" - // DefaultKeyFile is the default filename for the key pem file - DefaultKeyFile = "key.pem" - // DefaultCertFile is the default filename for the cert pem file - DefaultCertFile = "cert.pem" - // TLSVerifyKey is the default flag name for the tls verification option - TLSVerifyKey = "tlsverify" -) - -var ( - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" -) - -// CommonFlags are flags common to both the client and the daemon. -type CommonFlags struct { - FlagSet *flag.FlagSet - PostParse func() - - Debug bool - Hosts []string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options - TrustKey string -} - -// InitCommonFlags initializes flags common to both client and daemon -func InitCommonFlags() *CommonFlags { - var commonFlags = &CommonFlags{FlagSet: new(flag.FlagSet)} - - if dockerCertPath == "" { - dockerCertPath = cliconfig.ConfigDir() - } - - commonFlags.PostParse = func() { postParseCommon(commonFlags) } - - cmd := commonFlags.FlagSet - - cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") - cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") - cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") - cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") - - // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") - - var tlsOptions tlsconfig.Options - commonFlags.TLSOptions = &tlsOptions - cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, DefaultCaFile), "Trust certs signed only by this CA") - cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, DefaultCertFile), "Path to TLS certificate file") - cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, DefaultKeyFile), "Path to TLS key file") - - cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") - return commonFlags -} - -func postParseCommon(commonFlags *CommonFlags) { - cmd := commonFlags.FlagSet - - SetDaemonLogLevel(commonFlags.LogLevel) - - // Regardless of whether the user sets it to true or false, if they - // specify --tlsverify at all then we need to turn on tls - // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need - // to check that here as well - if cmd.IsSet("-"+TLSVerifyKey) || commonFlags.TLSVerify { - commonFlags.TLS = true - } - - if !commonFlags.TLS { - commonFlags.TLSOptions = nil - } else { - tlsOptions := commonFlags.TLSOptions - tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify - - // Reset CertFile and KeyFile to empty string if the user did not specify - // the respective flags and the respective default files were not found. - if !cmd.IsSet("-tlscert") { - if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { - tlsOptions.CertFile = "" - } - } - if !cmd.IsSet("-tlskey") { - if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { - tlsOptions.KeyFile = "" - } - } - } -} - -// SetDaemonLogLevel sets the logrus logging level -// TODO: this is a bad name, it applies to the client as well. -func SetDaemonLogLevel(logLevel string) { - if logLevel != "" { - lvl, err := logrus.ParseLevel(logLevel) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) - os.Exit(1) - } - logrus.SetLevel(lvl) - } else { - logrus.SetLevel(logrus.InfoLevel) - } -} diff --git a/cli/required.go b/cli/required.go deleted file mode 100644 index 8ee02c842..000000000 --- a/cli/required.go +++ /dev/null @@ -1,96 +0,0 @@ -package cli - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return fmt.Errorf( - "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} - -// RequiresMinArgs returns an error if there is not at least min args -func RequiresMinArgs(min int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresMaxArgs returns an error if there is not at most max args -func RequiresMaxArgs(max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresRangeArgs returns an error if there is not at least min args and at most max args -func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min && len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// ExactArgs returns an error if there is not the exact number of args -func ExactArgs(number int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) == number { - return nil - } - return fmt.Errorf( - "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - number, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} diff --git a/cli/usage.go b/cli/usage.go deleted file mode 100644 index 56e406e3a..000000000 --- a/cli/usage.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -// Command is the struct containing the command name and description -type Command struct { - Name string - Description string -} - -// DockerCommandUsage lists the top level docker commands and their short usage -var DockerCommandUsage = []Command{ - {"exec", "Run a command in a running container"}, - {"inspect", "Return low-level information on a container, image or task"}, -} - -// DockerCommands stores all the docker command -var DockerCommands = make(map[string]Command) - -func init() { - for _, cmd := range DockerCommandUsage { - DockerCommands[cmd.Name] = cmd - } -} diff --git a/cliconfig/config.go b/cliconfig/config.go deleted file mode 100644 index 9d5df0ac4..000000000 --- a/cliconfig/config.go +++ /dev/null @@ -1,120 +0,0 @@ -package cliconfig - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" - "github.com/docker/engine-api/types" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// ConfigDir returns the directory the configuration file is stored in -func ConfigDir() string { - return configDir -} - -// SetConfigDir sets the directory the configuration file is stored in -func SetConfigDir(dir string) { - configDir = dir -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - } -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = ConfigDir() - } - - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("%s - %v", configFile.Filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} diff --git a/cliconfig/config_test.go b/cliconfig/config_test.go deleted file mode 100644 index 78b360cc2..000000000 --- a/cliconfig/config_test.go +++ /dev/null @@ -1,545 +0,0 @@ -package cliconfig - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" -) - -func TestEmptyConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - SetConfigDir(tmpHome) - - config, err := Load("") - if err != nil { - t.Fatalf("Failed loading on empty config dir: %q", err) - } - - expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) - if config.Filename != expectedConfigFilename { - t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestMissingFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestSaveFileToDirs(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - tmpHome += "/.docker" - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestEmptyFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { - t.Fatal(err) - } - - _, err = Load(tmpHome) - if err == nil { - t.Fatalf("Was supposed to fail") - } -} - -func TestEmptyJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestOldInvalidsAuth(t *testing.T) { - invalids := map[string]string{ - `username = test`: "The Auth config file is empty", - `username -password`: "Invalid Auth config file", - `username = test -email`: "Invalid auth configuration file", - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - for content, expectedError := range invalids { - fn := filepath.Join(tmpHome, oldConfigfile) - if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) - } - - } -} - -func TestOldValidAuth(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `username = am9lam9lOmhlbGxv - email = user@example.com` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatal(err) - } - - // defaultIndexserver is https://index.docker.io/v1/ - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestOldJsonInvalid(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { - t.Fatalf("Expected an error got : %v, %v", config, err) - } -} - -func TestOldJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) - } -} - -func TestNewJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestNewJsonNoEmail(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestJsonWithPsFormat(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"psFormat":`) || - !strings.Contains(configStr, "{{.ID}}") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -// Save it and make sure it shows up in new form -func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { - if err := config.Save(); err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } - return string(buf) -} - -func TestConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - if ConfigDir() == tmpHome { - t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) - } - - // Update configDir - SetConfigDir(tmpHome) - - if ConfigDir() != tmpHome { - t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) - } -} - -func TestConfigFile(t *testing.T) { - configFilename := "configFilename" - configFile := NewConfigFile(configFilename) - - if configFile.Filename != configFilename { - t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) - } -} - -func TestJsonReaderNoFile(t *testing.T) { - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - -} - -func TestOldJsonReaderNoFile(t *testing.T) { - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - - config, err := LegacyLoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } -} - -func TestJsonWithPsFormatNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - -} - -func TestJsonSaveWithNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - err = config.SaveToWriter(f) - if err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) - } -} - -func TestLegacyJsonSaveWithNoFile(t *testing.T) { - - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - config, err := LegacyLoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err = config.SaveToWriter(f); err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) - } -} diff --git a/cliconfig/configfile/file.go b/cliconfig/configfile/file.go deleted file mode 100644 index 7c94e27dc..000000000 --- a/cliconfig/configfile/file.go +++ /dev/null @@ -1,177 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/types" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - Filename string `json:"-"` // Note: for internal use only -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - (configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0) -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/cliconfig/configfile/file_test.go b/cliconfig/configfile/file_test.go deleted file mode 100644 index 15eecb73e..000000000 --- a/cliconfig/configfile/file_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package configfile - -import ( - "testing" - - "github.com/docker/engine-api/types" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &types.AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} diff --git a/cliconfig/credentials/credentials.go b/cliconfig/credentials/credentials.go deleted file mode 100644 index 510cf8cf0..000000000 --- a/cliconfig/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/engine-api/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/cliconfig/credentials/default_store.go b/cliconfig/credentials/default_store.go deleted file mode 100644 index b4733709b..000000000 --- a/cliconfig/credentials/default_store.go +++ /dev/null @@ -1,22 +0,0 @@ -package credentials - -import ( - "os/exec" - - "github.com/docker/docker/cliconfig/configfile" -) - -// DetectDefaultStore sets the default credentials store -// if the host includes the default store helper program. -func DetectDefaultStore(c *configfile.ConfigFile) { - if c.CredentialsStore != "" { - // user defined - return - } - - if defaultCredentialsStore != "" { - if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { - c.CredentialsStore = defaultCredentialsStore - } - } -} diff --git a/cliconfig/credentials/default_store_darwin.go b/cliconfig/credentials/default_store_darwin.go deleted file mode 100644 index 63e8ed401..000000000 --- a/cliconfig/credentials/default_store_darwin.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "osxkeychain" diff --git a/cliconfig/credentials/default_store_linux.go b/cliconfig/credentials/default_store_linux.go deleted file mode 100644 index 864c540f6..000000000 --- a/cliconfig/credentials/default_store_linux.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "secretservice" diff --git a/cliconfig/credentials/default_store_unsupported.go b/cliconfig/credentials/default_store_unsupported.go deleted file mode 100644 index 519ef53dc..000000000 --- a/cliconfig/credentials/default_store_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -const defaultCredentialsStore = "" diff --git a/cliconfig/credentials/default_store_windows.go b/cliconfig/credentials/default_store_windows.go deleted file mode 100644 index fb6a9745c..000000000 --- a/cliconfig/credentials/default_store_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "wincred" diff --git a/cliconfig/credentials/file_store.go b/cliconfig/credentials/file_store.go deleted file mode 100644 index cf1c89fcc..000000000 --- a/cliconfig/credentials/file_store.go +++ /dev/null @@ -1,67 +0,0 @@ -package credentials - -import ( - "strings" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file *configfile.ConfigFile -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file *configfile.ConfigFile) Store { - return &fileStore{ - file: file, - } -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.AuthConfigs, serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.AuthConfigs[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range c.file.AuthConfigs { - if serverAddress == convertToHostname(registry) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.AuthConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.AuthConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} - -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} diff --git a/cliconfig/credentials/file_store_test.go b/cliconfig/credentials/file_store_test.go deleted file mode 100644 index f087f04e7..000000000 --- a/cliconfig/credentials/file_store_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package credentials - -import ( - "io/ioutil" - "testing" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { - tmp, _ := ioutil.TempFile("", "docker-test") - name := tmp.Name() - tmp.Close() - - c := cliconfig.NewConfigFile(name) - c.AuthConfigs = auths - return c -} - -func TestFileStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - - s := NewFileStore(f) - err := s.Store(types.AuthConfig{ - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs["https://example.com"] - if !ok { - t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGetAll(t *testing.T) { - s1 := "https://example.com" - s2 := "https://example2.com" - f := newConfigFile(map[string]types.AuthConfig{ - s1: { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - s2: { - Auth: "super_secret_token2", - Email: "foo@example2.com", - ServerAddress: "https://example2.com", - }, - }) - - s := NewFileStore(f) - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - if as[s1].Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) - } - if as[s1].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) - } - if as[s2].Auth != "super_secret_token2" { - t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) - } - if as[s2].Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) - } -} - -func TestFileStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - err := s.Erase("https://example.com") - if err != nil { - t.Fatal(err) - } - - // file store never returns errors, check that the auth config is empty - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - - if a.Auth != "" { - t.Fatalf("expected empty auth token, got %s", a.Auth) - } - if a.Email != "" { - t.Fatalf("expected empty email, got %s", a.Email) - } -} diff --git a/cliconfig/credentials/native_store.go b/cliconfig/credentials/native_store.go deleted file mode 100644 index 1717ce7b3..000000000 --- a/cliconfig/credentials/native_store.go +++ /dev/null @@ -1,126 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file *configfile.ConfigFile) Store { - name := remoteCredentialsPrefix + file.CredentialsStore - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, _ := c.fileStore.GetAll() - - for s, ac := range auths { - creds, _ := c.getCredentialsFromStore(s) - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - auths[s] = ac - } - - return auths, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keyckain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} diff --git a/cliconfig/credentials/native_store_test.go b/cliconfig/credentials/native_store_test.go deleted file mode 100644 index 952e447df..000000000 --- a/cliconfig/credentials/native_store_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package credentials - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/engine-api/types" -) - -const ( - validServerAddress = "https://index.docker.io/v1" - validServerAddress2 = "https://example.com:5002" - invalidServerAddress = "https://foobar.example.com" - missingCredsAddress = "https://missing.docker.io/v1" -) - -var errCommandExited = fmt.Errorf("exited 1") - -// mockCommand simulates interactions between the docker client and a remote -// credentials helper. -// Unit tests inject this mocked command into the remote to control execution. -type mockCommand struct { - arg string - input io.Reader -} - -// Output returns responses from the remote credentials helper. -// It mocks those responses based in the input in the mock. -func (m *mockCommand) Output() ([]byte, error) { - in, err := ioutil.ReadAll(m.input) - if err != nil { - return nil, err - } - inS := string(in) - - switch m.arg { - case "erase": - switch inS { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - case "get": - switch inS { - case validServerAddress: - return []byte(`{"Username": "foo", "Secret": "bar"}`), nil - case validServerAddress2: - return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil - case missingCredsAddress: - return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited - case invalidServerAddress: - return []byte("program failed"), errCommandExited - } - case "store": - var c credentials.Credentials - err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) - if err != nil { - return []byte("program failed"), errCommandExited - } - switch c.ServerURL { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - } - - return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited -} - -// Input sets the input to send to a remote credentials helper. -func (m *mockCommand) Input(in io.Reader) { - m.input = in -} - -func mockCommandFn(args ...string) client.Program { - return &mockCommand{ - arg: args[0], - } -} - -func TestNativeStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: validServerAddress, - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs[validServerAddress] - if !ok { - t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) - } - if a.Auth != "" { - t.Fatalf("expected auth to be empty, got %s", a.Auth) - } - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreAddInvalidCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: invalidServerAddress, - }) - - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if a.Username != "foo" { - t.Fatalf("expected username `foo`, got %s", a.Username) - } - if a.Password != "bar" { - t.Fatalf("expected password `bar`, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetIdentityToken(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress2: { - Email: "foo@example2.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress2) - if err != nil { - t.Fatal(err) - } - - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) - } - if a.Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetAll(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - validServerAddress2: { - Email: "foo@example2.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - - if as[validServerAddress].Username != "foo" { - t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) - } - if as[validServerAddress].Password != "bar" { - t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) - } - if as[validServerAddress].IdentityToken != "" { - t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) - } - if as[validServerAddress].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) - } - if as[validServerAddress2].Username != "" { - t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) - } - if as[validServerAddress2].Password != "" { - t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) - } - if as[validServerAddress2].IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) - } - if as[validServerAddress2].Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com` for %s, got %s", validServerAddress2, as[validServerAddress2].Email) - } -} - -func TestNativeStoreGetMissingCredentials(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(missingCredsAddress) - if err != nil { - // missing credentials do not produce an error - t.Fatal(err) - } -} - -func TestNativeStoreGetInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} - -func TestNativeStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreEraseInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} diff --git a/cmd/docker/daemon.go b/cmd/docker/daemon.go deleted file mode 100644 index 8fe348476..000000000 --- a/cmd/docker/daemon.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -const daemonBinary = "dockerd" - -// DaemonProxy acts as a cli.Handler to proxy calls to the daemon binary -type DaemonProxy struct{} - -// NewDaemonProxy returns a new handler -func NewDaemonProxy() DaemonProxy { - return DaemonProxy{} -} - -// Command returns a cli command handler if one exists -func (p DaemonProxy) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "daemon": p.CmdDaemon, - }[name] -} diff --git a/cmd/docker/daemon_none.go b/cmd/docker/daemon_none.go deleted file mode 100644 index d66bf1a54..000000000 --- a/cmd/docker/daemon_none.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !daemon - -package main - -import ( - "fmt" - "runtime" - "strings" -) - -// CmdDaemon reports on an error on windows, because there is no exec -func (p DaemonProxy) CmdDaemon(args ...string) error { - return fmt.Errorf( - "`docker daemon` is not supported on %s. Please run `dockerd` directly", - strings.Title(runtime.GOOS)) -} diff --git a/cmd/docker/daemon_none_test.go b/cmd/docker/daemon_none_test.go deleted file mode 100644 index d75453bcc..000000000 --- a/cmd/docker/daemon_none_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !daemon - -package main - -import ( - "strings" - "testing" -) - -func TestCmdDaemon(t *testing.T) { - proxy := NewDaemonProxy() - err := proxy.CmdDaemon("--help") - if err == nil { - t.Fatal("Expected CmdDaemon to fail on Windows.") - } - - if !strings.Contains(err.Error(), "Please run `dockerd`") { - t.Fatalf("Expected an error about running dockerd, got %s", err) - } -} diff --git a/cmd/docker/daemon_unix.go b/cmd/docker/daemon_unix.go deleted file mode 100644 index d515b8291..000000000 --- a/cmd/docker/daemon_unix.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build daemon - -package main - -import ( - "os" - "os/exec" - "path/filepath" - "syscall" -) - -// CmdDaemon execs dockerd with the same flags -func (p DaemonProxy) CmdDaemon(args ...string) error { - // Special case for handling `docker help daemon`. When pkg/mflag is removed - // we can support this on the daemon side, but that is not possible with - // pkg/mflag because it uses os.Exit(1) instead of returning an error on - // unexpected args. - if len(args) == 0 || args[0] != "--help" { - // Use os.Args[1:] so that "global" args are passed to dockerd - args = stripDaemonArg(os.Args[1:]) - } - - binaryPath, err := findDaemonBinary() - if err != nil { - return err - } - - return syscall.Exec( - binaryPath, - append([]string{daemonBinary}, args...), - os.Environ()) -} - -// findDaemonBinary looks for the path to the dockerd binary starting with -// the directory of the current executable (if one exists) and followed by $PATH -func findDaemonBinary() (string, error) { - execDirname := filepath.Dir(os.Args[0]) - if execDirname != "" { - binaryPath := filepath.Join(execDirname, daemonBinary) - if _, err := os.Stat(binaryPath); err == nil { - return binaryPath, nil - } - } - - return exec.LookPath(daemonBinary) -} - -// stripDaemonArg removes the `daemon` argument from the list -func stripDaemonArg(args []string) []string { - for i, arg := range args { - if arg == "daemon" { - return append(args[:i], args[i+1:]...) - } - } - return args -} diff --git a/cmd/docker/docker.go b/cmd/docker/docker.go deleted file mode 100644 index 0c727e32c..000000000 --- a/cmd/docker/docker.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/cobraadaptor" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/dockerversion" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" -) - -var ( - commonFlags = cliflags.InitCommonFlags() - clientFlags = initClientFlags(commonFlags) - flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") -) - -func main() { - // Set terminal emulation based on platform as required. - stdin, stdout, stderr := term.StdStreams() - - logrus.SetOutput(stderr) - - flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) - - cobraAdaptor := cobraadaptor.NewCobraAdaptor(clientFlags) - - flag.Usage = func() { - fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n docker [ --help | -v | --version ]\n\n") - fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") - - flag.CommandLine.SetOutput(stdout) - flag.PrintDefaults() - - help := "\nCommands:\n" - - dockerCommands := append(cli.DockerCommandUsage, cobraAdaptor.Usage()...) - for _, cmd := range sortCommands(dockerCommands) { - help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) - } - - help += "\nRun 'docker COMMAND --help' for more information on a command." - fmt.Fprintf(stdout, "%s\n", help) - } - - flag.Parse() - - if *flVersion { - showVersion() - return - } - - if *flHelp { - // if global flag --help is present, regardless of what other options and commands there are, - // just print the usage. - flag.Usage() - return - } - - clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) - - c := cli.New(clientCli, NewDaemonProxy(), cobraAdaptor) - if err := c.Run(flag.Args()...); err != nil { - if sterr, ok := err.(cli.StatusError); ok { - if sterr.Status != "" { - fmt.Fprintln(stderr, sterr.Status) - } - // StatusError should only be used for errors, and all errors should - // have a non-zero exit status, so never exit with 0 - if sterr.StatusCode == 0 { - os.Exit(1) - } - os.Exit(sterr.StatusCode) - } - fmt.Fprintln(stderr, err) - os.Exit(1) - } -} - -func showVersion() { - if utils.ExperimentalBuild() { - fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) - } else { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) - } -} - -func initClientFlags(commonFlags *cliflags.CommonFlags) *cliflags.ClientFlags { - clientFlags := &cliflags.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} - client := clientFlags.FlagSet - client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") - - clientFlags.PostParse = func() { - clientFlags.Common.PostParse() - - if clientFlags.ConfigDir != "" { - cliconfig.SetConfigDir(clientFlags.ConfigDir) - } - - if clientFlags.Common.TrustKey == "" { - clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - } - - if clientFlags.Common.Debug { - utils.EnableDebug() - } - } - return clientFlags -} diff --git a/cmd/docker/docker_test.go b/cmd/docker/docker_test.go deleted file mode 100644 index 5708c96cb..000000000 --- a/cmd/docker/docker_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "os" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/utils" -) - -func TestClientDebugEnabled(t *testing.T) { - defer utils.DisableDebug() - - clientFlags.Common.FlagSet.Parse([]string{"-D"}) - clientFlags.PostParse() - - if os.Getenv("DEBUG") != "1" { - t.Fatal("expected debug enabled, got false") - } - if logrus.GetLevel() != logrus.DebugLevel { - t.Fatalf("expected logrus debug level, got %v", logrus.GetLevel()) - } -} diff --git a/cmd/docker/docker_windows.go b/cmd/docker/docker_windows.go deleted file mode 100644 index 9bc507e20..000000000 --- a/cmd/docker/docker_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "sync/atomic" - - _ "github.com/docker/docker/autogen/winresources/docker" -) - -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" - -var dummy uintptr - -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) -} diff --git a/cmd/docker/usage.go b/cmd/docker/usage.go deleted file mode 100644 index 792d17807..000000000 --- a/cmd/docker/usage.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "sort" - - "github.com/docker/docker/cli" -) - -type byName []cli.Command - -func (a byName) Len() int { return len(a) } -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } - -// TODO(tiborvass): do not show 'daemon' on client-only binaries - -func sortCommands(commands []cli.Command) []cli.Command { - dockerCommands := make([]cli.Command, len(commands)) - copy(dockerCommands, commands) - sort.Sort(byName(dockerCommands)) - return dockerCommands -} diff --git a/cmd/docker/usage_test.go b/cmd/docker/usage_test.go deleted file mode 100644 index 0453265db..000000000 --- a/cmd/docker/usage_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "sort" - "testing" - - "github.com/docker/docker/cli" -) - -// Tests if the subcommands of docker are sorted -func TestDockerSubcommandsAreSorted(t *testing.T) { - if !sort.IsSorted(byName(cli.DockerCommandUsage)) { - t.Fatal("Docker subcommands are not in sorted order") - } -} diff --git a/cmd/dockerd/README.md b/cmd/dockerd/README.md deleted file mode 100644 index 015bc1333..000000000 --- a/cmd/dockerd/README.md +++ /dev/null @@ -1,3 +0,0 @@ -docker.go contains Docker's main function. - -This file provides first line CLI argument parsing and environment variable setting. diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go deleted file mode 100644 index 3713321db..000000000 --- a/cmd/dockerd/daemon.go +++ /dev/null @@ -1,449 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/uuid" - "github.com/docker/docker/api" - apiserver "github.com/docker/docker/api/server" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/build" - "github.com/docker/docker/api/server/router/container" - "github.com/docker/docker/api/server/router/image" - "github.com/docker/docker/api/server/router/network" - swarmrouter "github.com/docker/docker/api/server/router/swarm" - systemrouter "github.com/docker/docker/api/server/router/system" - "github.com/docker/docker/api/server/router/volume" - "github.com/docker/docker/builder/dockerfile" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/cluster" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/listeners" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/pidfile" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - daemonConfigFileFlag = "-config-file" -) - -// DaemonCli represents the daemon CLI. -type DaemonCli struct { - *daemon.Config - commonFlags *cliflags.CommonFlags - configFile *string - - api *apiserver.Server - d *daemon.Daemon -} - -func presentInHelp(usage string) string { return usage } -func absentFromHelp(string) string { return "" } - -// NewDaemonCli returns a pre-configured daemon CLI -func NewDaemonCli() *DaemonCli { - // TODO(tiborvass): remove InstallFlags? - daemonConfig := new(daemon.Config) - daemonConfig.LogConfig.Config = make(map[string]string) - daemonConfig.ClusterOpts = make(map[string]string) - - if runtime.GOOS != "linux" { - daemonConfig.V2Only = true - } - - daemonConfig.InstallFlags(flag.CommandLine, presentInHelp) - configFile := flag.CommandLine.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") - flag.CommandLine.Require(flag.Exact, 0) - - return &DaemonCli{ - Config: daemonConfig, - commonFlags: cliflags.InitCommonFlags(), - configFile: configFile, - } -} - -func migrateKey() (err error) { - // Migrate trust key if exists at ~/.docker/key.json and owned by current user - oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - newPath := filepath.Join(getDaemonConfDir(), cliflags.DefaultTrustKeyFile) - if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { - defer func() { - // Ensure old path is removed if no error occurred - if err == nil { - err = os.Remove(oldPath) - } else { - logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) - os.Remove(newPath) - } - }() - - if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { - return fmt.Errorf("Unable to create daemon configuration directory: %s", err) - } - - newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return fmt.Errorf("error creating key file %q: %s", newPath, err) - } - defer newFile.Close() - - oldFile, err := os.Open(oldPath) - if err != nil { - return fmt.Errorf("error opening key file %q: %s", oldPath, err) - } - defer oldFile.Close() - - if _, err := io.Copy(newFile, oldFile); err != nil { - return fmt.Errorf("error copying key: %s", err) - } - - logrus.Infof("Migrated key from %s to %s", oldPath, newPath) - } - - return nil -} - -func (cli *DaemonCli) start() (err error) { - stopc := make(chan bool) - defer close(stopc) - - // warn from uuid package when running the daemon - uuid.Loggerf = logrus.Warnf - - flags := flag.CommandLine - cli.commonFlags.PostParse() - - if cli.commonFlags.TrustKey == "" { - cli.commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), cliflags.DefaultTrustKeyFile) - } - cliConfig, err := loadDaemonCliConfig(cli.Config, flags, cli.commonFlags, *cli.configFile) - if err != nil { - return err - } - cli.Config = cliConfig - - if cli.Config.Debug { - utils.EnableDebug() - } - - if utils.ExperimentalBuild() { - logrus.Warn("Running experimental build") - } - - logrus.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: jsonlog.RFC3339NanoFixed, - DisableColors: cli.Config.RawLogs, - }) - - if err := setDefaultUmask(); err != nil { - return fmt.Errorf("Failed to set umask: %v", err) - } - - if len(cli.LogConfig.Config) > 0 { - if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { - return fmt.Errorf("Failed to set log opts: %v", err) - } - } - - if cli.Pidfile != "" { - pf, err := pidfile.New(cli.Pidfile) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - defer func() { - if err := pf.Remove(); err != nil { - logrus.Error(err) - } - }() - } - - serverConfig := &apiserver.Config{ - Logging: true, - SocketGroup: cli.Config.SocketGroup, - Version: dockerversion.Version, - EnableCors: cli.Config.EnableCors, - CorsHeaders: cli.Config.CorsHeaders, - } - - if cli.Config.TLS { - tlsOptions := tlsconfig.Options{ - CAFile: cli.Config.CommonTLSOptions.CAFile, - CertFile: cli.Config.CommonTLSOptions.CertFile, - KeyFile: cli.Config.CommonTLSOptions.KeyFile, - } - - if cli.Config.TLSVerify { - // server requires and verifies client's certificate - tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert - } - tlsConfig, err := tlsconfig.Server(tlsOptions) - if err != nil { - return err - } - serverConfig.TLSConfig = tlsConfig - } - - if len(cli.Config.Hosts) == 0 { - cli.Config.Hosts = make([]string, 1) - } - - api := apiserver.New(serverConfig) - cli.api = api - - for i := 0; i < len(cli.Config.Hosts); i++ { - var err error - if cli.Config.Hosts[i], err = opts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { - return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) - } - - protoAddr := cli.Config.Hosts[i] - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) - } - - proto := protoAddrParts[0] - addr := protoAddrParts[1] - - // It's a bad idea to bind to TCP without tlsverify. - if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { - logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") - } - ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) - if err != nil { - return err - } - ls = wrapListeners(proto, ls) - // If we're binding to a TCP port, make sure that a container doesn't try to use it. - if proto == "tcp" { - if err := allocateDaemonPort(addr); err != nil { - return err - } - } - logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - api.Accept(protoAddrParts[1], ls...) - } - - if err := migrateKey(); err != nil { - return err - } - cli.TrustKeyPath = cli.commonFlags.TrustKey - - registryService := registry.NewService(cli.Config.ServiceOptions) - containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) - if err != nil { - return err - } - cli.api = api - signal.Trap(func() { - cli.stop() - <-stopc // wait for daemonCli.start() to return - }) - - if err := pluginInit(cli.Config, containerdRemote, registryService); err != nil { - return err - } - - d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - - name, _ := os.Hostname() - - c, err := cluster.New(cluster.Config{ - Root: cli.Config.Root, - Name: name, - Backend: d, - }) - if err != nil { - logrus.Fatalf("Error creating cluster component: %v", err) - } - - logrus.Info("Daemon has completed initialization") - - logrus.WithFields(logrus.Fields{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, - "graphdriver": d.GraphDriverName(), - }).Info("Docker daemon") - - cli.initMiddlewares(api, serverConfig) - initRouter(api, d, c) - - cli.d = d - cli.setupConfigReloadTrap() - - // The serve API routine never exits unless an error occurs - // We need to start it as a goroutine and wait on it so - // daemon doesn't exit - serveAPIWait := make(chan error) - go api.Wait(serveAPIWait) - - // after the daemon is done setting up we can notify systemd api - notifySystem() - - // Daemon is fully initialized and handling API traffic - // Wait for serve API to complete - errAPI := <-serveAPIWait - c.Cleanup() - shutdownDaemon(d, 15) - containerdRemote.Cleanup() - if errAPI != nil { - return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) - } - - return nil -} - -func (cli *DaemonCli) reloadConfig() { - reload := func(config *daemon.Config) { - if err := cli.d.Reload(config); err != nil { - logrus.Errorf("Error reconfiguring the daemon: %v", err) - return - } - if config.IsValueSet("debug") { - debugEnabled := utils.IsDebugEnabled() - switch { - case debugEnabled && !config.Debug: // disable debug - utils.DisableDebug() - cli.api.DisableProfiler() - case config.Debug && !debugEnabled: // enable debug - utils.EnableDebug() - cli.api.EnableProfiler() - } - - } - } - - if err := daemon.ReloadConfiguration(*cli.configFile, flag.CommandLine, reload); err != nil { - logrus.Error(err) - } -} - -func (cli *DaemonCli) stop() { - cli.api.Close() -} - -// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case -// d.Shutdown() is waiting too long to kill container or worst it's -// blocked there -func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { - ch := make(chan struct{}) - go func() { - d.Shutdown() - close(ch) - }() - select { - case <-ch: - logrus.Debug("Clean shutdown succeeded") - case <-time.After(timeout * time.Second): - logrus.Error("Force shutdown daemon") - } -} - -func loadDaemonCliConfig(config *daemon.Config, flags *flag.FlagSet, commonConfig *cliflags.CommonFlags, configFile string) (*daemon.Config, error) { - config.Debug = commonConfig.Debug - config.Hosts = commonConfig.Hosts - config.LogLevel = commonConfig.LogLevel - config.TLS = commonConfig.TLS - config.TLSVerify = commonConfig.TLSVerify - config.CommonTLSOptions = daemon.CommonTLSOptions{} - - if commonConfig.TLSOptions != nil { - config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile - config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile - config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile - } - - if configFile != "" { - c, err := daemon.MergeDaemonConfigurations(config, flags, configFile) - if err != nil { - if flags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) - } - } - // the merged configuration can be nil if the config file didn't exist. - // leave the current configuration as it is if when that happens. - if c != nil { - config = c - } - } - - if err := daemon.ValidateConfiguration(config); err != nil { - return nil, err - } - - // Regardless of whether the user sets it to true or false, if they - // specify TLSVerify at all then we need to turn on TLS - if config.IsValueSet(cliflags.TLSVerifyKey) { - config.TLS = true - } - - // ensure that the log level is the one set after merging configurations - cliflags.SetDaemonLogLevel(config.LogLevel) - - return config, nil -} - -func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { - decoder := runconfig.ContainerDecoder{} - - routers := []router.Router{ - container.NewRouter(d, decoder), - image.NewRouter(d, decoder), - systemrouter.NewRouter(d, c), - volume.NewRouter(d), - build.NewRouter(dockerfile.NewBuildManager(d)), - swarmrouter.NewRouter(c), - } - if d.NetworkControllerEnabled() { - routers = append(routers, network.NewRouter(d, c)) - } - routers = addExperimentalRouters(routers) - - s.InitRouter(utils.IsDebugEnabled(), routers...) -} - -func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) { - v := cfg.Version - - vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) - s.UseMiddleware(vm) - - if cfg.EnableCors { - c := middleware.NewCORSMiddleware(cfg.CorsHeaders) - s.UseMiddleware(c) - } - - u := middleware.NewUserAgentMiddleware(v) - s.UseMiddleware(u) - - if len(cli.Config.AuthorizationPlugins) > 0 { - authZPlugins := authorization.NewPlugins(cli.Config.AuthorizationPlugins) - handleAuthorization := authorization.NewMiddleware(authZPlugins) - s.UseMiddleware(handleAuthorization) - } -} diff --git a/cmd/dockerd/daemon_freebsd.go b/cmd/dockerd/daemon_freebsd.go deleted file mode 100644 index 623aaf4b0..000000000 --- a/cmd/dockerd/daemon_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package main - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go deleted file mode 100644 index a556daa18..000000000 --- a/cmd/dockerd/daemon_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux - -package main - -import systemdDaemon "github.com/coreos/go-systemd/daemon" - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { - // Tell the init daemon we are accepting requests - go systemdDaemon.SdNotify("READY=1") -} diff --git a/cmd/dockerd/daemon_no_plugin_support.go b/cmd/dockerd/daemon_no_plugin_support.go deleted file mode 100644 index 17f249190..000000000 --- a/cmd/dockerd/daemon_no_plugin_support.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !experimental !linux - -package main - -import ( - "github.com/docker/docker/daemon" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/registry" -) - -func pluginInit(config *daemon.Config, remote libcontainerd.Remote, rs registry.Service) error { - return nil -} diff --git a/cmd/dockerd/daemon_plugin_support.go b/cmd/dockerd/daemon_plugin_support.go deleted file mode 100644 index cbbfe67a4..000000000 --- a/cmd/dockerd/daemon_plugin_support.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build linux,experimental - -package main - -import ( - "github.com/docker/docker/daemon" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/plugin" - "github.com/docker/docker/registry" -) - -func pluginInit(config *daemon.Config, remote libcontainerd.Remote, rs registry.Service) error { - return plugin.Init(config.Root, config.ExecRoot, remote, rs, config.LiveRestore) -} diff --git a/cmd/dockerd/daemon_solaris.go b/cmd/dockerd/daemon_solaris.go deleted file mode 100644 index a0f490860..000000000 --- a/cmd/dockerd/daemon_solaris.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build solaris - -package main - -import ( - "fmt" - "net" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" -) - -const defaultDaemonConfigFile = "" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir() string { - return "/etc/docker" -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - opts := []libcontainerd.RemoteOption{} - return opts -} - -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") -} - -func allocateDaemonPort(addr string) error { - return nil -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/cmd/dockerd/daemon_test.go b/cmd/dockerd/daemon_test.go deleted file mode 100644 index c16e11aec..000000000 --- a/cmd/dockerd/daemon_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-connections/tlsconfig" -) - -func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - Debug: true, - } - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if !loadedConfig.Debug { - t.Fatalf("expected debug to be copied from the common flags, got false") - } -} - -func TestLoadDaemonCliConfigWithTLS(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLS: true, - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.CommonTLSOptions.CAFile != "/tmp/ca.pem" { - t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.CommonTLSOptions.CAFile, loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"labels": ["l3=foo"]}`)) - f.Close() - - var labels []string - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{daemonConfigFileFlag}, "", "") - flags.Var(opts.NewNamedListOptsRef("labels", &labels, opts.ValidateLabel), []string{"-label"}, "") - - flags.Set(daemonConfigFileFlag, configFile) - if err := flags.Set("-label", "l1=bar"); err != nil { - t.Fatal(err) - } - if err := flags.Set("-label", "l2=baz"); err != nil { - t.Fatal(err) - } - - _, err = loadDaemonCliConfig(c, flags, common, configFile) - if err == nil { - t.Fatalf("expected configuration error, got nil") - } - if !strings.Contains(err.Error(), "labels") { - t.Fatalf("expected labels conflict, got %v", err) - } -} - -func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlsverify": true}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"-tlsverify"}, false, "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if !loadedConfig.TLS { - t.Fatalf("expected TLS enabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlsverify": false}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"-tlsverify"}, false, "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if !loadedConfig.TLS { - t.Fatalf("expected TLS enabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if loadedConfig.TLS { - t.Fatalf("expected TLS disabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"log-level": "warn"}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-log-level"}, "", "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.LogLevel != "warn" { - t.Fatalf("expected warn log level, got %v", loadedConfig.LogLevel) - } - - if logrus.GetLevel() != logrus.WarnLevel { - t.Fatalf("expected warn log level, got %v", logrus.GetLevel()) - } -} - -func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-tlscacert"}, "", "") - flags.String([]string{"-log-driver"}, "", "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - if loadedConfig.CommonTLSOptions.CAFile != "/etc/certs/ca.pem" { - t.Fatalf("expected CA file path /etc/certs/ca.pem, got %v", loadedConfig.CommonTLSOptions.CAFile) - } - if loadedConfig.LogConfig.Type != "syslog" { - t.Fatalf("expected LogConfig type syslog, got %v", loadedConfig.LogConfig.Type) - } -} - -func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - c.ServiceOptions.InstallCliFlags(flags, absentFromHelp) - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"registry-mirrors": ["https://mirrors.docker.com"], "insecure-registries": ["https://insecure.docker.com"], "disable-legacy-registry": true}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - m := loadedConfig.Mirrors - if len(m) != 1 { - t.Fatalf("expected 1 mirror, got %d", len(m)) - } - - r := loadedConfig.InsecureRegistries - if len(r) != 1 { - t.Fatalf("expected 1 insecure registries, got %d", len(r)) - } - - if !loadedConfig.V2Only { - t.Fatal("expected disable-legacy-registry to be true, got false") - } -} diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go deleted file mode 100644 index 114e9426f..000000000 --- a/cmd/dockerd/daemon_unix.go +++ /dev/null @@ -1,131 +0,0 @@ -// +build !windows,!solaris - -package main - -import ( - "fmt" - "net" - "os" - "os/signal" - "path/filepath" - "strconv" - "syscall" - - "github.com/docker/docker/cmd/dockerd/hack" - "github.com/docker/docker/daemon" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" - "github.com/docker/libnetwork/portallocator" -) - -const defaultDaemonConfigFile = "/etc/docker/daemon.json" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir() string { - return "/etc/docker" -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP) - go func() { - for range c { - cli.reloadConfig() - } - }() -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - opts := []libcontainerd.RemoteOption{ - libcontainerd.WithDebugLog(cli.Config.Debug), - libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), - } - if cli.Config.ContainerdAddr != "" { - opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) - } else { - opts = append(opts, libcontainerd.WithStartDaemon(true)) - } - if daemon.UsingSystemd(cli.Config) { - args := []string{"--systemd-cgroup=true"} - opts = append(opts, libcontainerd.WithRuntimeArgs(args)) - } - if cli.Config.LiveRestore { - opts = append(opts, libcontainerd.WithLiveRestore(true)) - } - opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) - return opts -} - -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") -} - -// allocateDaemonPort ensures that there are no containers -// that try to use any port allocated for the docker server. -func allocateDaemonPort(addr string) error { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - intPort, err := strconv.Atoi(port) - if err != nil { - return err - } - - var hostIPs []net.IP - if parsedIP := net.ParseIP(host); parsedIP != nil { - hostIPs = append(hostIPs, parsedIP) - } else if hostIPs, err = net.LookupIP(host); err != nil { - return fmt.Errorf("failed to lookup %s address in host specification", host) - } - - pa := portallocator.Get() - for _, hostIP := range hostIPs { - if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { - return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) - } - } - return nil -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - switch proto { - case "unix": - ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} - case "fd": - for i := range ls { - ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} - } - } - return ls -} diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go deleted file mode 100644 index a72468edd..000000000 --- a/cmd/dockerd/daemon_unix_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build !windows - -package main - -import ( - "io/ioutil" - "testing" - - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" -) - -func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - Debug: true, - LogLevel: "info", - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"log-opts": {"max-size": "1k"}}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{daemonConfigFileFlag}, "", "") - flags.BoolVar(&c.EnableSelinuxSupport, []string{"-selinux-enabled"}, true, "") - flags.StringVar(&c.LogConfig.Type, []string{"-log-driver"}, "json-file", "") - flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") - flags.Set(daemonConfigFileFlag, configFile) - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if !loadedConfig.Debug { - t.Fatalf("expected debug mode, got false") - } - if loadedConfig.LogLevel != "info" { - t.Fatalf("expected info log level, got %v", loadedConfig.LogLevel) - } - if !loadedConfig.EnableSelinuxSupport { - t.Fatalf("expected enabled selinux support, got disabled") - } - if loadedConfig.LogConfig.Type != "json-file" { - t.Fatalf("expected LogConfig type json-file, got %v", loadedConfig.LogConfig.Type) - } - if maxSize := loadedConfig.LogConfig.Config["max-size"]; maxSize != "1k" { - t.Fatalf("expected log max-size `1k`, got %s", maxSize) - } -} - -func TestLoadDaemonConfigWithNetwork(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-bip"}, "", "") - flags.String([]string{"-ip"}, "", "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"bip": "127.0.0.2", "ip": "127.0.0.1"}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.IP != "127.0.0.2" { - t.Fatalf("expected IP 127.0.0.2, got %v", loadedConfig.IP) - } - if loadedConfig.DefaultIP.String() != "127.0.0.1" { - t.Fatalf("expected DefaultIP 127.0.0.1, got %s", loadedConfig.DefaultIP) - } -} - -func TestLoadDaemonConfigWithMapOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "") - flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{ - "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, - "log-opts": {"tag": "test"} -}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - if loadedConfig.ClusterOpts == nil { - t.Fatal("expected cluster options, got nil") - } - - expectedPath := "/var/lib/docker/discovery_certs/ca.pem" - if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath { - t.Fatalf("expected %s, got %s", expectedPath, caPath) - } - - if loadedConfig.LogConfig.Config == nil { - t.Fatal("expected log config options, got nil") - } - if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" { - t.Fatalf("expected log tag `test`, got %s", tag) - } -} - -func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - if err := flags.ParseFlags([]string{}, false); err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{ - "userland-proxy": false -}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - if loadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be disabled, got enabled") - } - - // make sure reloading doesn't generate configuration - // conflicts after normalizing boolean values. - err = daemon.ReloadConfiguration(configFile, flags, func(reloadedConfig *daemon.Config) { - if reloadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be disabled, got enabled") - } - }) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - if err := flags.ParseFlags([]string{}, false); err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - if !loadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be enabled, got disabled") - } -} diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go deleted file mode 100644 index 9772f2b2e..000000000 --- a/cmd/dockerd/daemon_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "net" - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" -) - -var defaultDaemonConfigFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "daemon.json" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - return false -} - -// setDefaultUmask doesn't do anything on windows -func setDefaultUmask() error { - return nil -} - -func getDaemonConfDir() string { - return os.Getenv("PROGRAMDATA") + `\docker\config` -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { - if service != nil { - err := service.started() - if err != nil { - logrus.Fatal(err) - } - } -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { - if service != nil { - service.stopped(err) - } -} - -// setupConfigReloadTrap configures a Win32 event to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - go func() { - sa := syscall.SecurityAttributes{ - Length: 0, - } - ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) - if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { - logrus.Debugf("Config reload - waiting signal at %s", ev) - for { - syscall.WaitForSingleObject(h, syscall.INFINITE) - cli.reloadConfig() - } - } - }() -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - return nil -} - -// getLibcontainerdRoot gets the root directory for libcontainerd to store its -// state. The Windows libcontainerd implementation does not need to write a spec -// or state to disk, so this is a no-op. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return "" -} - -func allocateDaemonPort(addr string) error { - return nil -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/cmd/dockerd/docker.go b/cmd/dockerd/docker.go deleted file mode 100644 index 5eec9e047..000000000 --- a/cmd/dockerd/docker.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/dockerversion" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" -) - -var ( - daemonCli = NewDaemonCli() - flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") -) - -func main() { - if reexec.Init() { - return - } - - // Set terminal emulation based on platform as required. - _, stdout, stderr := term.StdStreams() - - logrus.SetOutput(stderr) - - flag.Merge(flag.CommandLine, daemonCli.commonFlags.FlagSet) - - flag.Usage = func() { - fmt.Fprint(stdout, "Usage: dockerd [ --help | -v | --version ]\n\n") - fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") - - flag.CommandLine.SetOutput(stdout) - flag.PrintDefaults() - } - flag.CommandLine.ShortUsage = func() { - fmt.Fprint(stderr, "\nUsage:\tdockerd [OPTIONS]\n") - } - - if err := flag.CommandLine.ParseFlags(os.Args[1:], false); err != nil { - os.Exit(1) - } - - if *flVersion { - showVersion() - return - } - - if *flHelp { - // if global flag --help is present, regardless of what other options and commands there are, - // just print the usage. - flag.Usage() - return - } - - // On Windows, this may be launching as a service or with an option to - // register the service. - stop, err := initService() - if err != nil { - logrus.Fatal(err) - } - - if !stop { - err = daemonCli.start() - notifyShutdown(err) - if err != nil { - logrus.Fatal(err) - } - } -} - -func showVersion() { - if utils.ExperimentalBuild() { - fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) - } else { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) - } -} diff --git a/cmd/dockerd/docker_windows.go b/cmd/dockerd/docker_windows.go deleted file mode 100644 index 19c5587cb..000000000 --- a/cmd/dockerd/docker_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "sync/atomic" - - _ "github.com/docker/docker/autogen/winresources/dockerd" -) - -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" - -var dummy uintptr - -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) -} diff --git a/cmd/dockerd/hack/malformed_host_override.go b/cmd/dockerd/hack/malformed_host_override.go deleted file mode 100644 index d4aa3ddd7..000000000 --- a/cmd/dockerd/hack/malformed_host_override.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build !windows - -package hack - -import "net" - -// MalformedHostHeaderOverride is a wrapper to be able -// to overcome the 400 Bad request coming from old docker -// clients that send an invalid Host header. -type MalformedHostHeaderOverride struct { - net.Listener -} - -// MalformedHostHeaderOverrideConn wraps the underlying unix -// connection and keeps track of the first read from http.Server -// which just reads the headers. -type MalformedHostHeaderOverrideConn struct { - net.Conn - first bool -} - -var closeConnHeader = []byte("\r\nConnection: close\r") - -// Read reads the first *read* request from http.Server to inspect -// the Host header. If the Host starts with / then we're talking to -// an old docker client which send an invalid Host header. To not -// error out in http.Server we rewrite the first bytes of the request -// to sanitize the Host header itself. -// In case we're not dealing with old docker clients the data is just passed -// to the server w/o modification. -func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { - // http.Server uses a 4k buffer - if l.first && len(b) == 4096 { - // This keeps track of the first read from http.Server which just reads - // the headers - l.first = false - // The first read of the connection by http.Server is done limited to - // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. - // Here we do the first read which gets us all the http headers to - // be inspected and modified below. - c, err := l.Conn.Read(b) - if err != nil { - return c, err - } - - var ( - start, end int - firstLineFeed = -1 - buf []byte - ) - for i := 0; i <= c-1-7; i++ { - if b[i] == '\n' && firstLineFeed == -1 { - firstLineFeed = i - } - if b[i] != '\n' { - continue - } - - if b[i+1] == '\r' && b[i+2] == '\n' { - return c, nil - } - - if b[i+1] != 'H' { - continue - } - if b[i+2] != 'o' { - continue - } - if b[i+3] != 's' { - continue - } - if b[i+4] != 't' { - continue - } - if b[i+5] != ':' { - continue - } - if b[i+6] != ' ' { - continue - } - if b[i+7] != '/' { - continue - } - // ensure clients other than the docker clients do not get this hack - if i != firstLineFeed { - return c, nil - } - start = i + 7 - // now find where the value ends - for ii, bbb := range b[start:c] { - if bbb == '\n' { - end = start + ii - break - } - } - buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) - // strip the value of the host header and - // inject `Connection: close` to ensure we don't reuse this connection - buf = append(buf, b[:start]...) - buf = append(buf, closeConnHeader...) - buf = append(buf, b[end:c]...) - copy(b, buf) - break - } - if len(buf) == 0 { - return c, nil - } - return len(buf), nil - } - return l.Conn.Read(b) -} - -// Accept makes the listener accepts connections and wraps the connection -// in a MalformedHostHeaderOverrideConn initilizing first to true. -func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return c, err - } - return &MalformedHostHeaderOverrideConn{c, true}, nil -} diff --git a/cmd/dockerd/hack/malformed_host_override_test.go b/cmd/dockerd/hack/malformed_host_override_test.go deleted file mode 100644 index 1a0a60baf..000000000 --- a/cmd/dockerd/hack/malformed_host_override_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !windows - -package hack - -import ( - "bytes" - "io" - "net" - "strings" - "testing" -) - -type bufConn struct { - net.Conn - buf *bytes.Buffer -} - -func (bc *bufConn) Read(b []byte) (int, error) { - return bc.buf.Read(b) -} - -func TestHeaderOverrideHack(t *testing.T) { - tests := [][2][]byte{ - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), - }, - { - []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - }, - } - - // Test for https://github.com/docker/docker/issues/23045 - h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" - h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" - tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) - - for _, pair := range tests { - read := make([]byte, 4096) - client := &bufConn{ - buf: bytes.NewBuffer(pair[0]), - } - l := MalformedHostHeaderOverrideConn{client, true} - - n, err := l.Read(read) - if err != nil && err != io.EOF { - t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) - } - if !bytes.Equal(read[:n], pair[1][:n]) { - t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) - } - } -} - -func BenchmarkWithHack(b *testing.B) { - client, srv := net.Pipe() - done := make(chan struct{}) - req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") - read := make([]byte, 4096) - b.SetBytes(int64(len(req) * 30)) - - l := MalformedHostHeaderOverrideConn{client, true} - go func() { - for { - if _, err := srv.Write(req); err != nil { - srv.Close() - break - } - l.first = true // make sure each subsequent run uses the hack parsing - } - close(done) - }() - - for i := 0; i < b.N; i++ { - for i := 0; i < 30; i++ { - if n, err := l.Read(read); err != nil && err != io.EOF { - b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) - } - } - } - l.Close() - <-done -} - -func BenchmarkNoHack(b *testing.B) { - client, srv := net.Pipe() - done := make(chan struct{}) - req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") - read := make([]byte, 4096) - b.SetBytes(int64(len(req) * 30)) - - go func() { - for { - if _, err := srv.Write(req); err != nil { - srv.Close() - break - } - } - close(done) - }() - - for i := 0; i < b.N; i++ { - for i := 0; i < 30; i++ { - if _, err := client.Read(read); err != nil && err != io.EOF { - b.Fatal(err) - } - } - } - client.Close() - <-done -} diff --git a/cmd/dockerd/routes.go b/cmd/dockerd/routes.go deleted file mode 100644 index 65b97bd8c..000000000 --- a/cmd/dockerd/routes.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package main - -import "github.com/docker/docker/api/server/router" - -func addExperimentalRouters(routers []router.Router) []router.Router { - return routers -} diff --git a/cmd/dockerd/routes_experimental.go b/cmd/dockerd/routes_experimental.go deleted file mode 100644 index 665df9499..000000000 --- a/cmd/dockerd/routes_experimental.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build experimental - -package main - -import ( - "github.com/docker/docker/api/server/router" - pluginrouter "github.com/docker/docker/api/server/router/plugin" - "github.com/docker/docker/plugin" -) - -func addExperimentalRouters(routers []router.Router) []router.Router { - return append(routers, pluginrouter.NewRouter(plugin.GetManager())) -} diff --git a/cmd/dockerd/service_unsupported.go b/cmd/dockerd/service_unsupported.go deleted file mode 100644 index dd53802a5..000000000 --- a/cmd/dockerd/service_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package main - -func initService() (bool, error) { - return false, nil -} diff --git a/cmd/dockerd/service_windows.go b/cmd/dockerd/service_windows.go deleted file mode 100644 index e78dad20c..000000000 --- a/cmd/dockerd/service_windows.go +++ /dev/null @@ -1,369 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "syscall" - - "github.com/Sirupsen/logrus" - flag "github.com/docker/docker/pkg/mflag" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/debug" - "golang.org/x/sys/windows/svc/eventlog" - "golang.org/x/sys/windows/svc/mgr" -) - -var ( - flServiceName = flag.String([]string{"-service-name"}, "docker", "Set the Windows service name") - flRegisterService = flag.Bool([]string{"-register-service"}, false, "Register the service and exit") - flUnregisterService = flag.Bool([]string{"-unregister-service"}, false, "Unregister the service and exit") - flRunService = flag.Bool([]string{"-run-service"}, false, "") - - setStdHandle = syscall.NewLazyDLL("kernel32.dll").NewProc("SetStdHandle") - oldStderr syscall.Handle - panicFile *os.File - - service *handler -) - -const ( - // These should match the values in event_messages.mc. - eventInfo = 1 - eventWarn = 1 - eventError = 1 - eventDebug = 2 - eventPanic = 3 - eventFatal = 4 - - eventExtraOffset = 10 // Add this to any event to get a string that supports extended data -) - -type handler struct { - tosvc chan bool - fromsvc chan error -} - -type etwHook struct { - log *eventlog.Log -} - -func (h *etwHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} - -func (h *etwHook) Fire(e *logrus.Entry) error { - var ( - etype uint16 - eid uint32 - ) - - switch e.Level { - case logrus.PanicLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventPanic - case logrus.FatalLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventFatal - case logrus.ErrorLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventError - case logrus.WarnLevel: - etype = windows.EVENTLOG_WARNING_TYPE - eid = eventWarn - case logrus.InfoLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventInfo - case logrus.DebugLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventDebug - default: - return errors.New("unknown level") - } - - // If there is additional data, include it as a second string. - exts := "" - if len(e.Data) > 0 { - fs := bytes.Buffer{} - for k, v := range e.Data { - fs.WriteString(k) - fs.WriteByte('=') - fmt.Fprint(&fs, v) - fs.WriteByte(' ') - } - - exts = fs.String()[:fs.Len()-1] - eid += eventExtraOffset - } - - if h.log == nil { - fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) - return nil - } - - var ( - ss [2]*uint16 - err error - ) - - ss[0], err = syscall.UTF16PtrFromString(e.Message) - if err != nil { - return err - } - - count := uint16(1) - if exts != "" { - ss[1], err = syscall.UTF16PtrFromString(exts) - if err != nil { - return err - } - - count++ - } - - return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) -} - -func getServicePath() (string, error) { - p, err := exec.LookPath(os.Args[0]) - if err != nil { - return "", err - } - return filepath.Abs(p) -} - -func registerService() error { - p, err := getServicePath() - if err != nil { - return err - } - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - c := mgr.Config{ - ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, - StartType: mgr.StartAutomatic, - ErrorControl: mgr.ErrorNormal, - DisplayName: "Docker Engine", - } - - // Configure the service to launch with the arguments that were just passed. - args := []string{"--run-service"} - for _, a := range os.Args[1:] { - if a != "--register-service" && a != "--unregister-service" { - args = append(args, a) - } - } - - s, err := m.CreateService(*flServiceName, p, c, args...) - if err != nil { - return err - } - defer s.Close() - err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) - if err != nil { - return err - } - - return nil -} - -func unregisterService() error { - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - - s, err := m.OpenService(*flServiceName) - if err != nil { - return err - } - defer s.Close() - - eventlog.Remove(*flServiceName) - err = s.Delete() - if err != nil { - return err - } - return nil -} - -func initService() (bool, error) { - if *flUnregisterService { - if *flRegisterService { - return true, errors.New("--register-service and --unregister-service cannot be used together") - } - return true, unregisterService() - } - - if *flRegisterService { - return true, registerService() - } - - if !*flRunService { - return false, nil - } - - interactive, err := svc.IsAnInteractiveSession() - if err != nil { - return false, err - } - - h := &handler{ - tosvc: make(chan bool), - fromsvc: make(chan error), - } - - var log *eventlog.Log - if !interactive { - log, err = eventlog.Open(*flServiceName) - if err != nil { - return false, err - } - } - - logrus.AddHook(&etwHook{log}) - logrus.SetOutput(ioutil.Discard) - - service = h - go func() { - if interactive { - err = debug.Run(*flServiceName, h) - } else { - err = svc.Run(*flServiceName, h) - } - - h.fromsvc <- err - }() - - // Wait for the first signal from the service handler. - err = <-h.fromsvc - if err != nil { - return false, err - } - return false, nil -} - -func (h *handler) started() error { - // This must be delayed until daemonCli initializes Config.Root - err := initPanicFile(filepath.Join(daemonCli.Config.Root, "panic.log")) - if err != nil { - return err - } - - h.tosvc <- false - return nil -} - -func (h *handler) stopped(err error) { - logrus.Debugf("Stopping service: %v", err) - h.tosvc <- err != nil - <-h.fromsvc -} - -func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { - s <- svc.Status{State: svc.StartPending, Accepts: 0} - // Unblock initService() - h.fromsvc <- nil - - // Wait for initialization to complete. - failed := <-h.tosvc - if failed { - logrus.Debug("Aborting service start due to failure during initialization") - return true, 1 - } - - s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - logrus.Debug("Service running") -Loop: - for { - select { - case failed = <-h.tosvc: - break Loop - case c := <-r: - switch c.Cmd { - case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): - daemonCli.reloadConfig() - case svc.Interrogate: - s <- c.CurrentStatus - case svc.Stop, svc.Shutdown: - s <- svc.Status{State: svc.StopPending, Accepts: 0} - daemonCli.stop() - } - } - } - - removePanicFile() - if failed { - return true, 1 - } - return false, 0 -} - -func initPanicFile(path string) error { - var err error - panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - return err - } - - st, err := panicFile.Stat() - if err != nil { - return err - } - - // If there are contents in the file already, move the file out of the way - // and replace it. - if st.Size() > 0 { - panicFile.Close() - os.Rename(path, path+".old") - panicFile, err = os.Create(path) - if err != nil { - return err - } - } - - // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to - // it when it panics. Remember the old stderr to restore it before removing - // the panic file. - sh := syscall.STD_ERROR_HANDLE - h, err := syscall.GetStdHandle(sh) - if err != nil { - return err - } - - oldStderr = h - - r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) - if r == 0 && err != nil { - return err - } - - return nil -} - -func removePanicFile() { - if st, err := panicFile.Stat(); err == nil { - if st.Size() == 0 { - sh := syscall.STD_ERROR_HANDLE - setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) - panicFile.Close() - os.Remove(panicFile.Name()) - } - } -} diff --git a/container/archive.go b/container/archive.go deleted file mode 100644 index e22a001c5..000000000 --- a/container/archive.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" -) - -// ResolvePath resolves the given path in the container to a resource on the -// host. Returns a resolved path (absolute path to the resource on the host), -// the absolute path to the resource relative to the container's rootfs, and -// an error if the path points to outside the container's rootfs. -func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) - if err != nil { - return "", "", err - } - - // Consider the given path as an absolute path in the container. - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) - - // Split the absPath into its Directory and Base components. We will - // resolve the dir in the scope of the container then append the base. - dirPath, basePath := filepath.Split(absPath) - - resolvedDirPath, err := container.GetResourcePath(dirPath) - if err != nil { - return "", "", err - } - - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - - return resolvedPath, absPath, nil -} - -// StatPath is the unexported version of StatPath. Locks and mounts should -// be acquired before calling this method and the given path should be fully -// resolved to a path on the host corresponding to the given absolute path -// inside the container. -func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { - lstat, err := os.Lstat(resolvedPath) - if err != nil { - return nil, err - } - - var linkTarget string - if lstat.Mode()&os.ModeSymlink != 0 { - // Fully evaluate the symlink in the scope of the container rootfs. - hostPath, err := container.GetResourcePath(absPath) - if err != nil { - return nil, err - } - - linkTarget, err = filepath.Rel(container.BaseFS, hostPath) - if err != nil { - return nil, err - } - - // Make it an absolute path. - linkTarget = filepath.Join(string(filepath.Separator), linkTarget) - } - - return &types.ContainerPathStat{ - Name: filepath.Base(absPath), - Size: lstat.Size(), - Mode: lstat.Mode(), - Mtime: lstat.ModTime(), - LinkTarget: linkTarget, - }, nil -} diff --git a/container/container.go b/container/container.go deleted file mode 100644 index d34226922..000000000 --- a/container/container.go +++ /dev/null @@ -1,974 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/restartmanager" - "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/label" -) - -const configFileName = "config.v2.json" - -var ( - errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") - errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") -) - -// DetachError is special error which returned in case of container detach. -type DetachError struct{} - -func (DetachError) Error() string { - return "detached from container" -} - -// CommonContainer holds the fields for a container which are -// applicable across all platforms supported by the daemon. -type CommonContainer struct { - *runconfig.StreamConfig - // embed for Container to support states directly. - *State `json:"State"` // Needed for remote api version <= 1.11 - Root string `json:"-"` // Path to the "home" of the container, including metadata. - BaseFS string `json:"-"` // Path to the graphdriver mountpoint - RWLayer layer.RWLayer `json:"-"` - ID string - Created time.Time - Managed bool - Path string - Args []string - Config *containertypes.Config - ImageID image.ID `json:"Image"` - NetworkSettings *network.Settings - LogPath string - Name string - Driver string - // MountLabel contains the options for the 'mount' command - MountLabel string - ProcessLabel string - RestartCount int - HasBeenStartedBefore bool - HasBeenManuallyStopped bool // used for unless-stopped restart policy - MountPoints map[string]*volume.MountPoint - HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable - ExecCommands *exec.Store `json:"-"` - // logDriver for closing - LogDriver logger.Logger `json:"-"` - LogCopier *logger.Copier `json:"-"` - restartManager restartmanager.RestartManager - attachContext *attachContext -} - -// NewBaseContainer creates a new container with its -// basic configuration. -func NewBaseContainer(id, root string) *Container { - return &Container{ - CommonContainer: CommonContainer{ - ID: id, - State: NewState(), - ExecCommands: exec.NewStore(), - Root: root, - MountPoints: make(map[string]*volume.MountPoint), - StreamConfig: runconfig.NewStreamConfig(), - attachContext: &attachContext{}, - }, - } -} - -// FromDisk loads the container configuration stored in the host. -func (container *Container) FromDisk() error { - pth, err := container.ConfigPath() - if err != nil { - return err - } - - jsonSource, err := os.Open(pth) - if err != nil { - return err - } - defer jsonSource.Close() - - dec := json.NewDecoder(jsonSource) - - // Load container settings - if err := dec.Decode(container); err != nil { - return err - } - - if err := label.ReserveLabel(container.ProcessLabel); err != nil { - return err - } - return container.readHostConfig() -} - -// ToDisk saves the container configuration on disk. -func (container *Container) ToDisk() error { - pth, err := container.ConfigPath() - if err != nil { - return err - } - - jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0666) - if err != nil { - return err - } - defer jsonSource.Close() - - enc := json.NewEncoder(jsonSource) - - // Save container settings - if err := enc.Encode(container); err != nil { - return err - } - - return container.WriteHostConfig() -} - -// ToDiskLocking saves the container configuration on disk in a thread safe way. -func (container *Container) ToDiskLocking() error { - container.Lock() - err := container.ToDisk() - container.Unlock() - return err -} - -// readHostConfig reads the host configuration from disk for the container. -func (container *Container) readHostConfig() error { - container.HostConfig = &containertypes.HostConfig{} - // If the hostconfig file does not exist, do not read it. - // (We still have to initialize container.HostConfig, - // but that's OK, since we just did that above.) - pth, err := container.HostConfigPath() - if err != nil { - return err - } - - f, err := os.Open(pth) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - - if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { - return err - } - - container.InitDNSHostConfig() - - return nil -} - -// WriteHostConfig saves the host configuration on disk for the container. -func (container *Container) WriteHostConfig() error { - pth, err := container.HostConfigPath() - if err != nil { - return err - } - - f, err := ioutils.NewAtomicFileWriter(pth, 0666) - if err != nil { - return err - } - defer f.Close() - - return json.NewEncoder(f).Encode(&container.HostConfig) -} - -// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir -func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { - if container.Config.WorkingDir == "" { - return nil - } - - container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) - - // If can't mount container FS at this point (eg Hyper-V Containers on - // Windows) bail out now with no action. - if !container.canMountFS() { - return nil - } - - pth, err := container.GetResourcePath(container.Config.WorkingDir) - if err != nil { - return err - } - - if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { - pthInfo, err2 := os.Stat(pth) - if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { - return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) - } - - return err - } - - return nil -} - -// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path -// sanitisation. Symlinks are all scoped to the BaseFS of the container, as -// though the container's BaseFS was `/`. -// -// The BaseFS of a container is the host-facing path which is bind-mounted as -// `/` inside the container. This method is essentially used to access a -// particular path inside the container as though you were a process in that -// container. -// -// NOTE: The returned path is *only* safely scoped inside the container's BaseFS -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetResourcePath(path string) (string, error) { - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - - cleanPath := cleanResourcePath(path) - r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) - - // Log this here on the daemon side as there's otherwise no indication apart - // from the error being propagated all the way back to the client. This makes - // debugging significantly easier and clearly indicates the error comes from the daemon. - if e != nil { - logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) - } - return r, e -} - -// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path -// sanitisation. Symlinks are all scoped to the root of the container, as -// though the container's root was `/`. -// -// The root of a container is the host-facing configuration metadata directory. -// Only use this method to safely access the container's `container.json` or -// other metadata files. If in doubt, use container.GetResourcePath. -// -// NOTE: The returned path is *only* safely scoped inside the container's root -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetRootResourcePath(path string) (string, error) { - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - cleanPath := filepath.Join(string(os.PathSeparator), path) - return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) -} - -// ExitOnNext signals to the monitor that it should not restart the container -// after we send the kill signal. -func (container *Container) ExitOnNext() { - if container.restartManager != nil { - container.restartManager.Cancel() - } -} - -// HostConfigPath returns the path to the container's JSON hostconfig -func (container *Container) HostConfigPath() (string, error) { - return container.GetRootResourcePath("hostconfig.json") -} - -// ConfigPath returns the path to the container's JSON config -func (container *Container) ConfigPath() (string, error) { - return container.GetRootResourcePath(configFileName) -} - -// StartLogger starts a new logger driver for the container. -func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { - c, err := logger.GetLogDriver(cfg.Type) - if err != nil { - return nil, fmt.Errorf("Failed to get logging factory: %v", err) - } - ctx := logger.Context{ - Config: cfg.Config, - ContainerID: container.ID, - ContainerName: container.Name, - ContainerEntrypoint: container.Path, - ContainerArgs: container.Args, - ContainerImageID: container.ImageID.String(), - ContainerImageName: container.Config.Image, - ContainerCreated: container.Created, - ContainerEnv: container.Config.Env, - ContainerLabels: container.Config.Labels, - DaemonName: "docker", - } - - // Set logging file for "json-logger" - if cfg.Type == jsonfilelog.Name { - ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) - if err != nil { - return nil, err - } - } - return c(ctx) -} - -// GetProcessLabel returns the process label for the container. -func (container *Container) GetProcessLabel() string { - // even if we have a process label return "" if we are running - // in privileged mode - if container.HostConfig.Privileged { - return "" - } - return container.ProcessLabel -} - -// GetMountLabel returns the mounting label for the container. -// This label is empty if the container is privileged. -func (container *Container) GetMountLabel() string { - return container.MountLabel -} - -// GetExecIDs returns the list of exec commands running on the container. -func (container *Container) GetExecIDs() []string { - return container.ExecCommands.List() -} - -// Attach connects to the container's TTY, delegating to standard -// streams or websockets depending on the configuration. -func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - ctx := container.InitAttachContext() - return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) -} - -// AttachStreams connects streams to a TTY. -// Used by exec too. Should this move somewhere else? -func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - var ( - cStdout, cStderr io.ReadCloser - cStdin io.WriteCloser - wg sync.WaitGroup - errors = make(chan error, 3) - ) - - if stdin != nil && openStdin { - cStdin = streamConfig.StdinPipe() - wg.Add(1) - } - - if stdout != nil { - cStdout = streamConfig.StdoutPipe() - wg.Add(1) - } - - if stderr != nil { - cStderr = streamConfig.StderrPipe() - wg.Add(1) - } - - // Connect stdin of container to the http conn. - go func() { - if stdin == nil || !openStdin { - return - } - logrus.Debug("attach: stdin: begin") - - var err error - if tty { - _, err = copyEscapable(cStdin, stdin, keys) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: stdin: %s", err) - errors <- err - } - if stdinOnce && !tty { - cStdin.Close() - } else { - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - } - logrus.Debug("attach: stdin: end") - wg.Done() - }() - - attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { - if stream == nil { - return - } - - logrus.Debugf("attach: %s: begin", name) - _, err := io.Copy(stream, streamPipe) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: %s: %v", name, err) - errors <- err - } - // Make sure stdin gets closed - if stdin != nil { - stdin.Close() - } - streamPipe.Close() - logrus.Debugf("attach: %s: end", name) - wg.Done() - } - - go attachStream("stdout", stdout, cStdout) - go attachStream("stderr", stderr, cStderr) - - return promise.Go(func() error { - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-ctx.Done(): - // close all pipes - if cStdin != nil { - cStdin.Close() - } - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - <-done - } - close(errors) - for err := range errors { - if err != nil { - return err - } - } - return nil - }) -} - -// Code c/c from io.Copy() modified to handle escape sequence -func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { - if len(keys) == 0 { - // Default keys : ctrl-p ctrl-q - keys = []byte{16, 17} - } - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - src.Close() - return 0, DetachError{} - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - // ---- End of docker - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -// ShouldRestart decides whether the daemon should restart the container or not. -// This is based on the container's restart policy. -func (container *Container) ShouldRestart() bool { - shouldRestart, _, _ := container.restartManager.ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) - return shouldRestart -} - -// AddMountPointWithVolume adds a new mount point configured with a volume to the container. -func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { - container.MountPoints[destination] = &volume.MountPoint{ - Name: vol.Name(), - Driver: vol.DriverName(), - Destination: destination, - RW: rw, - Volume: vol, - CopyData: volume.DefaultCopyMode, - } -} - -// IsDestinationMounted checks whether a path is mounted on the container or not. -func (container *Container) IsDestinationMounted(destination string) bool { - return container.MountPoints[destination] != nil -} - -// StopSignal returns the signal used to stop the container. -func (container *Container) StopSignal() int { - var stopSignal syscall.Signal - if container.Config.StopSignal != "" { - stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) - } - - if int(stopSignal) == 0 { - stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) - } - return int(stopSignal) -} - -// InitDNSHostConfig ensures that the dns fields are never nil. -// New containers don't ever have those fields nil, -// but pre created containers can still have those nil values. -// The non-recommended host configuration in the start api can -// make these fields nil again, this corrects that issue until -// we remove that behavior for good. -// See https://github.com/docker/docker/pull/17779 -// for a more detailed explanation on why we don't want that. -func (container *Container) InitDNSHostConfig() { - container.Lock() - defer container.Unlock() - if container.HostConfig.DNS == nil { - container.HostConfig.DNS = make([]string, 0) - } - - if container.HostConfig.DNSSearch == nil { - container.HostConfig.DNSSearch = make([]string, 0) - } - - if container.HostConfig.DNSOptions == nil { - container.HostConfig.DNSOptions = make([]string, 0) - } -} - -// GetEndpointInNetwork returns the container's endpoint to the provided network. -func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { - endpointName := strings.TrimPrefix(container.Name, "/") - return n.EndpointByName(endpointName) -} - -func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - if len(networkSettings.Ports) == 0 { - pm, err := getEndpointPortMapInfo(ep) - if err != nil { - return err - } - networkSettings.Ports = pm - } - return nil -} - -func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { - pm := nat.PortMap{} - driverInfo, err := ep.DriverInfo() - if err != nil { - return pm, err - } - - if driverInfo == nil { - // It is not an error for epInfo to be nil - return pm, nil - } - - if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { - if exposedPorts, ok := expData.([]types.TransportPort); ok { - for _, tp := range exposedPorts { - natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) - if err != nil { - return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) - } - pm[natPort] = nil - } - } - } - - mapData, ok := driverInfo[netlabel.PortMap] - if !ok { - return pm, nil - } - - if portMapping, ok := mapData.([]types.PortBinding); ok { - for _, pp := range portMapping { - natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) - if err != nil { - return pm, err - } - natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} - pm[natPort] = append(pm[natPort], natBndg) - } - } - - return pm, nil -} - -// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox -func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { - pm := nat.PortMap{} - if sb == nil { - return pm - } - - for _, ep := range sb.Endpoints() { - pm, _ = getEndpointPortMapInfo(ep) - if len(pm) > 0 { - break - } - } - return pm -} - -// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. -func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - - if _, ok := networkSettings.Networks[n.Name()]; !ok { - networkSettings.Networks[n.Name()] = new(networktypes.EndpointSettings) - } - networkSettings.Networks[n.Name()].NetworkID = n.ID() - networkSettings.Networks[n.Name()].EndpointID = ep.ID() - - iface := epInfo.Iface() - if iface == nil { - return nil - } - - if iface.MacAddress() != nil { - networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() - } - - if iface.Address() != nil { - ones, _ := iface.Address().Mask.Size() - networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() - networkSettings.Networks[n.Name()].IPPrefixLen = ones - } - - if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() - networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 - } - - return nil -} - -// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. -func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.buildPortMapInfo(ep); err != nil { - return err - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - if epInfo.Gateway() != nil { - container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() - } - if epInfo.GatewayIPv6().To16() != nil { - container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() - } - - return nil -} - -// UpdateSandboxNetworkSettings updates the sandbox ID and Key. -func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { - container.NetworkSettings.SandboxID = sb.ID() - container.NetworkSettings.SandboxKey = sb.Key() - return nil -} - -// BuildJoinOptions builds endpoint Join options from a given network. -func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { - var joinOptions []libnetwork.EndpointOption - if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { - for _, str := range epConfig.Links { - name, alias, err := runconfigopts.ParseLink(str) - if err != nil { - return nil, err - } - joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) - } - } - return joinOptions, nil -} - -// BuildCreateEndpointOptions builds endpoint options from a given network. -func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox) ([]libnetwork.EndpointOption, error) { - var ( - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - createOptions []libnetwork.EndpointOption - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - - if n.Name() == defaultNetName || container.NetworkSettings.IsAnonymousEndpoint { - createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) - } - - if epConfig != nil { - ipam := epConfig.IPAMConfig - if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { - var ipList []net.IP - for _, ips := range ipam.LinkLocalIPs { - if ip := net.ParseIP(ips); ip != nil { - ipList = append(ipList, ip) - } - } - createOptions = append(createOptions, - libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) - } - - for _, alias := range epConfig.Aliases { - createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) - } - } - - if container.NetworkSettings.Service != nil { - svcCfg := container.NetworkSettings.Service - - var vip string - if svcCfg.VirtualAddresses[n.ID()] != nil { - vip = svcCfg.VirtualAddresses[n.ID()].IPv4 - } - - var portConfigs []*libnetwork.PortConfig - for _, portConfig := range svcCfg.ExposedPorts { - portConfigs = append(portConfigs, &libnetwork.PortConfig{ - Name: portConfig.Name, - Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - - createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) - } - - if !containertypes.NetworkMode(n.Name()).IsUserDefined() { - createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) - } - - // configs that are applicable only for the endpoint in the network - // to which container was connected to on docker run. - // Ideally all these network-specific endpoint configurations must be moved under - // container.NetworkSettings.Networks[n.Name()] - if n.Name() == container.HostConfig.NetworkMode.NetworkName() || - (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { - if container.Config.MacAddress != "" { - mac, err := net.ParseMAC(container.Config.MacAddress) - if err != nil { - return nil, err - } - - genericOption := options.Generic{ - netlabel.MacAddress: mac, - } - - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) - } - } - - // Port-mapping rules belong to the container & applicable only to non-internal networks - portmaps := GetSandboxPortMapInfo(sb) - if n.Info().Internal() || len(portmaps) > 0 { - return createOptions, nil - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - createOptions = append(createOptions, - libnetwork.CreateOptionPortMapping(pbList), - libnetwork.CreateOptionExposedPorts(exposeList)) - - return createOptions, nil -} - -// UpdateMonitor updates monitor configure for running container -func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { - type policySetter interface { - SetPolicy(containertypes.RestartPolicy) - } - - if rm, ok := container.RestartManager(false).(policySetter); ok { - rm.SetPolicy(restartPolicy) - } -} - -// FullHostname returns hostname and optional domain appended to it. -func (container *Container) FullHostname() string { - fullHostname := container.Config.Hostname - if container.Config.Domainname != "" { - fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) - } - return fullHostname -} - -// RestartManager returns the current restartmanager instance connected to container. -func (container *Container) RestartManager(reset bool) restartmanager.RestartManager { - if reset { - container.RestartCount = 0 - container.restartManager = nil - } - if container.restartManager == nil { - container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) - } - - return container.restartManager -} - -type attachContext struct { - ctx context.Context - cancel context.CancelFunc - mu sync.Mutex -} - -// InitAttachContext initialize or returns existing context for attach calls to -// track container liveness. -func (container *Container) InitAttachContext() context.Context { - container.attachContext.mu.Lock() - defer container.attachContext.mu.Unlock() - if container.attachContext.ctx == nil { - container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) - } - return container.attachContext.ctx -} - -// CancelAttachContext cancel attach context. All attach calls should detach -// after this call. -func (container *Container) CancelAttachContext() { - container.attachContext.mu.Lock() - if container.attachContext.ctx != nil { - container.attachContext.cancel() - container.attachContext.ctx = nil - } - container.attachContext.mu.Unlock() -} diff --git a/container/container_solaris.go b/container/container_solaris.go deleted file mode 100644 index ca02d8ea8..000000000 --- a/container/container_solaris.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build solaris - -package container - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types/container" -) - -// Container holds fields specific to the Solaris implementation. See -// CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // fields below here are platform specific. - HostnamePath string - HostsPath string - ResolvConfPath string -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int -} - -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - return nil -} - -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - return volumeMounts, nil -} - -// TrySetNetworkMount attempts to set the network mounts given a provided destination and -// the path to use for it; return true if the given destination was a network mount file -func (container *Container) TrySetNetworkMount(destination string, path string) bool { - return true -} - -// NetworkMounts returns the list of network mounts. -func (container *Container) NetworkMounts() []Mount { - var mount []Mount - return mount -} - -// CopyImagePathContent copies files in destination to the volume. -func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - return nil -} - -// UnmountIpcMounts unmount Ipc related mounts. -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { -} - -// IpcMounts returns the list of Ipc related mounts. -func (container *Container) IpcMounts() []Mount { - return nil -} - -// UpdateContainer updates configuration of a container -func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error { - return nil -} - -// UnmountVolumes explicitly unmounts volumes from the container. -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - return nil -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - return mounts -} - -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - return nil -} - -// canMountFS determines if the file system for the container -// can be mounted locally. A no-op on non-Windows platforms -func (container *Container) canMountFS() bool { - return true -} diff --git a/container/container_unit_test.go b/container/container_unit_test.go deleted file mode 100644 index 67b829f9f..000000000 --- a/container/container_unit_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types/container" -) - -func TestContainerStopSignal(t *testing.T) { - c := &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{}, - }, - } - - def, err := signal.ParseSignal(signal.DefaultStopSignal) - if err != nil { - t.Fatal(err) - } - - s := c.StopSignal() - if s != int(def) { - t.Fatalf("Expected %v, got %v", def, s) - } - - c = &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{StopSignal: "SIGKILL"}, - }, - } - s = c.StopSignal() - if s != 9 { - t.Fatalf("Expected 9, got %v", s) - } -} diff --git a/container/container_unix.go b/container/container_unix.go deleted file mode 100644 index 9539e6f68..000000000 --- a/container/container_unix.go +++ /dev/null @@ -1,419 +0,0 @@ -// +build linux freebsd - -package container - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/label" -) - -// DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container -const DefaultSHMSize int64 = 67108864 - -// Container holds the fields specific to unixen implementations. -// See CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // Fields below here are platform specific. - AppArmorProfile string - HostnamePath string - HostsPath string - ShmPath string - ResolvConfPath string - SeccompProfile string - NoNewPrivileges bool -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int - - // Whether the container encountered an OOM. - OOMKilled bool -} - -// CreateDaemonEnvironment returns the list of all environment variables given the list of -// environment variables related to links. -// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. -// The defaults set here do not override the values in container.Config.Env -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - // Setup environment - env := []string{ - "PATH=" + system.DefaultPathEnv, - "HOSTNAME=" + container.Config.Hostname, - } - if container.Config.Tty { - env = append(env, "TERM=xterm") - } - env = append(env, linkedEnv...) - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) - return env -} - -// TrySetNetworkMount attempts to set the network mounts given a provided destination and -// the path to use for it; return true if the given destination was a network mount file -func (container *Container) TrySetNetworkMount(destination string, path string) bool { - if destination == "/etc/resolv.conf" { - container.ResolvConfPath = path - return true - } - if destination == "/etc/hostname" { - container.HostnamePath = path - return true - } - if destination == "/etc/hosts" { - container.HostsPath = path - return true - } - - return false -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - hostnamePath, err := container.GetRootResourcePath("hostname") - if err != nil { - return err - } - container.HostnamePath = hostnamePath - return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) -} - -// appendNetworkMounts appends any network mounts to the array of mount points passed in -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - for _, mnt := range container.NetworkMounts() { - dest, err := container.GetResourcePath(mnt.Destination) - if err != nil { - return nil, err - } - volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) - } - return volumeMounts, nil -} - -// NetworkMounts returns the list of network mounts. -func (container *Container) NetworkMounts() []Mount { - var mounts []Mount - shared := container.HostConfig.NetworkMode.IsContainer() - if container.ResolvConfPath != "" { - if _, err := os.Stat(container.ResolvConfPath); err != nil { - logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) - } else { - if !container.HasMountFor("/etc/resolv.conf") { - label.Relabel(container.ResolvConfPath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.ResolvConfPath, - Destination: "/etc/resolv.conf", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - if container.HostnamePath != "" { - if _, err := os.Stat(container.HostnamePath); err != nil { - logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) - } else { - if !container.HasMountFor("/etc/hostname") { - label.Relabel(container.HostnamePath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hostname"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.HostnamePath, - Destination: "/etc/hostname", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - if container.HostsPath != "" { - if _, err := os.Stat(container.HostsPath); err != nil { - logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) - } else { - if !container.HasMountFor("/etc/hosts") { - label.Relabel(container.HostsPath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hosts"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.HostsPath, - Destination: "/etc/hosts", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - return mounts -} - -// CopyImagePathContent copies files in destination to the volume. -func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) - if err != nil { - return err - } - - if _, err = ioutil.ReadDir(rootfs); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - id := stringid.GenerateNonCryptoID() - path, err := v.Mount(id) - if err != nil { - return err - } - - defer func() { - if err := v.Unmount(id); err != nil { - logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) - } - }() - return copyExistingContents(rootfs, path) -} - -// ShmResourcePath returns path to shm -func (container *Container) ShmResourcePath() (string, error) { - return container.GetRootResourcePath("shm") -} - -// HasMountFor checks if path is a mountpoint -func (container *Container) HasMountFor(path string) bool { - _, exists := container.MountPoints[path] - return exists -} - -// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { - if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { - return - } - - var warnings []string - - if !container.HasMountFor("/dev/shm") { - shmPath, err := container.ShmResourcePath() - if err != nil { - logrus.Error(err) - warnings = append(warnings, err.Error()) - } else if shmPath != "" { - if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { - warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) - } - - } - } - - if len(warnings) > 0 { - logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) - } -} - -// IpcMounts returns the list of IPC mounts -func (container *Container) IpcMounts() []Mount { - var mounts []Mount - - if !container.HasMountFor("/dev/shm") { - label.SetFileLabel(container.ShmPath, container.MountLabel) - mounts = append(mounts, Mount{ - Source: container.ShmPath, - Destination: "/dev/shm", - Writable: true, - Propagation: volume.DefaultPropagationMode, - }) - } - - return mounts -} - -// UpdateContainer updates configuration of a container. -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - - // update resources of container - resources := hostConfig.Resources - cResources := &container.HostConfig.Resources - if resources.BlkioWeight != 0 { - cResources.BlkioWeight = resources.BlkioWeight - } - if resources.CPUShares != 0 { - cResources.CPUShares = resources.CPUShares - } - if resources.CPUPeriod != 0 { - cResources.CPUPeriod = resources.CPUPeriod - } - if resources.CPUQuota != 0 { - cResources.CPUQuota = resources.CPUQuota - } - if resources.CpusetCpus != "" { - cResources.CpusetCpus = resources.CpusetCpus - } - if resources.CpusetMems != "" { - cResources.CpusetMems = resources.CpusetMems - } - if resources.Memory != 0 { - cResources.Memory = resources.Memory - } - if resources.MemorySwap != 0 { - cResources.MemorySwap = resources.MemorySwap - } - if resources.MemoryReservation != 0 { - cResources.MemoryReservation = resources.MemoryReservation - } - if resources.KernelMemory != 0 { - cResources.KernelMemory = resources.KernelMemory - } - - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving updated container: %v", err) - return err - } - - return nil -} - -func detachMounted(path string) error { - return syscall.Unmount(path, syscall.MNT_DETACH) -} - -// UnmountVolumes unmounts all volumes -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - var ( - volumeMounts []volume.MountPoint - err error - ) - - for _, mntPoint := range container.MountPoints { - dest, err := container.GetResourcePath(mntPoint.Destination) - if err != nil { - return err - } - - volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) - } - - // Append any network mounts to the list (this is a no-op on Windows) - if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { - return err - } - - for _, volumeMount := range volumeMounts { - if forceSyscall { - if err := detachMounted(volumeMount.Destination); err != nil { - logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) - } - } - - if volumeMount.Volume != nil { - if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { - return err - } - volumeMount.ID = "" - - attributes := map[string]string{ - "driver": volumeMount.Volume.DriverName(), - "container": container.ID, - } - volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) - } - } - - return nil -} - -// copyExistingContents copies from the source to the destination and -// ensures the ownership is appropriately set. -func copyExistingContents(source, destination string) error { - volList, err := ioutil.ReadDir(source) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(destination) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty, copies files from the root into the volume - if err := chrootarchive.CopyWithTar(source, destination); err != nil { - return err - } - } - } - return copyOwnership(source, destination) -} - -// copyOwnership copies the permissions and uid:gid of the source file -// to the destination file -func copyOwnership(source, destination string) error { - stat, err := system.Stat(source) - if err != nil { - return err - } - - if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { - return err - } - - return os.Chmod(destination, os.FileMode(stat.Mode())) -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - for dest, data := range container.HostConfig.Tmpfs { - mounts = append(mounts, Mount{ - Source: "tmpfs", - Destination: dest, - Data: data, - }) - } - return mounts -} - -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - -// canMountFS determines if the file system for the container -// can be mounted locally. A no-op on non-Windows platforms -func (container *Container) canMountFS() bool { - return true -} diff --git a/container/container_windows.go b/container/container_windows.go deleted file mode 100644 index 38560bb59..000000000 --- a/container/container_windows.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build windows - -package container - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/utils" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" -) - -// Container holds fields specific to the Windows implementation. See -// CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - HostnamePath string - HostsPath string - ResolvConfPath string - // Fields below here are platform specific. -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int -} - -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) -} - -// UnmountIpcMounts unmount Ipc related mounts. -// This is a NOOP on windows. -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { -} - -// IpcMounts returns the list of Ipc related mounts. -func (container *Container) IpcMounts() []Mount { - return nil -} - -// UnmountVolumes explicitly unmounts volumes from the container. -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - return nil -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - return mounts -} - -// UpdateContainer updates configuration of a container -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - resources := hostConfig.Resources - if resources.BlkioWeight != 0 || resources.CPUShares != 0 || - resources.CPUPeriod != 0 || resources.CPUQuota != 0 || - resources.CpusetCpus != "" || resources.CpusetMems != "" || - resources.Memory != 0 || resources.MemorySwap != 0 || - resources.MemoryReservation != 0 || resources.KernelMemory != 0 { - return fmt.Errorf("Resource updating isn't supported on Windows") - } - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - return nil -} - -// appendNetworkMounts appends any network mounts to the array of mount points passed in. -// Windows does not support network mounts (not to be confused with SMB network mounts), so -// this is a no-op. -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - return volumeMounts, nil -} - -// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares -// to combine with a volume path -func cleanResourcePath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(os.PathSeparator), path) -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - return nil -} - -// canMountFS determines if the file system for the container -// can be mounted locally. In the case of Windows, this is not possible -// for Hyper-V containers during WORKDIR execution for example. -func (container *Container) canMountFS() bool { - return !containertypes.Isolation.IsHyperV(container.HostConfig.Isolation) -} diff --git a/container/health.go b/container/health.go deleted file mode 100644 index 0955a6d47..000000000 --- a/container/health.go +++ /dev/null @@ -1,49 +0,0 @@ -package container - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/types" -) - -// Health holds the current container health-check state -type Health struct { - types.Health - stop chan struct{} // Write struct{} to stop the monitor -} - -// String returns a human-readable description of the health-check state -func (s *Health) String() string { - if s.stop == nil { - return "no healthcheck" - } - switch s.Status { - case types.Starting: - return "health: starting" - default: // Healthy and Unhealthy are clear on their own - return s.Status - } -} - -// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, -// it returns nil. -func (s *Health) OpenMonitorChannel() chan struct{} { - if s.stop == nil { - logrus.Debug("OpenMonitorChannel") - s.stop = make(chan struct{}) - return s.stop - } - return nil -} - -// CloseMonitorChannel closes any existing monitor channel. -func (s *Health) CloseMonitorChannel() { - if s.stop != nil { - logrus.Debug("CloseMonitorChannel: waiting for probe to stop") - // This channel does not buffer. Once the write succeeds, the monitor - // has read the stop request and will not make any further updates - // to c.State.Health. - s.stop <- struct{}{} - s.stop = nil - logrus.Debug("CloseMonitorChannel done") - } -} diff --git a/container/history.go b/container/history.go deleted file mode 100644 index c80c2aa0c..000000000 --- a/container/history.go +++ /dev/null @@ -1,30 +0,0 @@ -package container - -import "sort" - -// History is a convenience type for storing a list of containers, -// sorted by creation date in descendant order. -type History []*Container - -// Len returns the number of containers in the history. -func (history *History) Len() int { - return len(*history) -} - -// Less compares two containers and returns true if the second one -// was created before the first one. -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].Created.Before(containers[i].Created) -} - -// Swap switches containers i and j positions in the history. -func (history *History) Swap(i, j int) { - containers := *history - containers[i], containers[j] = containers[j], containers[i] -} - -// sort orders the history by creation date in descendant order. -func (history *History) sort() { - sort.Sort(history) -} diff --git a/container/memory_store.go b/container/memory_store.go deleted file mode 100644 index 9fa1165d9..000000000 --- a/container/memory_store.go +++ /dev/null @@ -1,92 +0,0 @@ -package container - -import "sync" - -// memoryStore implements a Store in memory. -type memoryStore struct { - s map[string]*Container - sync.RWMutex -} - -// NewMemoryStore initializes a new memory store. -func NewMemoryStore() Store { - return &memoryStore{ - s: make(map[string]*Container), - } -} - -// Add appends a new container to the memory store. -// It overrides the id if it existed before. -func (c *memoryStore) Add(id string, cont *Container) { - c.Lock() - c.s[id] = cont - c.Unlock() -} - -// Get returns a container from the store by id. -func (c *memoryStore) Get(id string) *Container { - c.RLock() - res := c.s[id] - c.RUnlock() - return res -} - -// Delete removes a container from the store by id. -func (c *memoryStore) Delete(id string) { - c.Lock() - delete(c.s, id) - c.Unlock() -} - -// List returns a sorted list of containers from the store. -// The containers are ordered by creation date. -func (c *memoryStore) List() []*Container { - containers := History(c.all()) - containers.sort() - return containers -} - -// Size returns the number of containers in the store. -func (c *memoryStore) Size() int { - c.RLock() - defer c.RUnlock() - return len(c.s) -} - -// First returns the first container found in the store by a given filter. -func (c *memoryStore) First(filter StoreFilter) *Container { - for _, cont := range c.all() { - if filter(cont) { - return cont - } - } - return nil -} - -// ApplyAll calls the reducer function with every container in the store. -// This operation is asyncronous in the memory store. -// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. -func (c *memoryStore) ApplyAll(apply StoreReducer) { - wg := new(sync.WaitGroup) - for _, cont := range c.all() { - wg.Add(1) - go func(container *Container) { - apply(container) - wg.Done() - }(cont) - } - - wg.Wait() -} - -func (c *memoryStore) all() []*Container { - c.RLock() - containers := make([]*Container, 0, len(c.s)) - for _, cont := range c.s { - containers = append(containers, cont) - } - c.RUnlock() - return containers -} - -var _ Store = &memoryStore{} diff --git a/container/memory_store_test.go b/container/memory_store_test.go deleted file mode 100644 index f81738fae..000000000 --- a/container/memory_store_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package container - -import ( - "testing" - "time" -) - -func TestNewMemoryStore(t *testing.T) { - s := NewMemoryStore() - m, ok := s.(*memoryStore) - if !ok { - t.Fatalf("store is not a memory store %v", s) - } - if m.s == nil { - t.Fatal("expected store map to not be nil") - } -} - -func TestAddContainers(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - if s.Size() != 1 { - t.Fatalf("expected store size 1, got %v", s.Size()) - } -} - -func TestGetContainer(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - c := s.Get("id") - if c == nil { - t.Fatal("expected container to not be nil") - } -} - -func TestDeleteContainer(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - s.Delete("id") - if c := s.Get("id"); c != nil { - t.Fatalf("expected container to be nil after removal, got %v", c) - } - - if s.Size() != 0 { - t.Fatalf("expected store size to be 0, got %v", s.Size()) - } -} - -func TestListContainers(t *testing.T) { - s := NewMemoryStore() - - cont := NewBaseContainer("id", "root") - cont.Created = time.Now() - cont2 := NewBaseContainer("id2", "root") - cont2.Created = time.Now().Add(24 * time.Hour) - - s.Add("id", cont) - s.Add("id2", cont2) - - list := s.List() - if len(list) != 2 { - t.Fatalf("expected list size 2, got %v", len(list)) - } - if list[0].ID != "id2" { - t.Fatalf("expected older container to be first, got %v", list[0].ID) - } -} - -func TestFirstContainer(t *testing.T) { - s := NewMemoryStore() - - s.Add("id", NewBaseContainer("id", "root")) - s.Add("id2", NewBaseContainer("id2", "root")) - - first := s.First(func(cont *Container) bool { - return cont.ID == "id2" - }) - - if first == nil { - t.Fatal("expected container to not be nil") - } - if first.ID != "id2" { - t.Fatalf("expected id2, got %v", first) - } -} - -func TestApplyAllContainer(t *testing.T) { - s := NewMemoryStore() - - s.Add("id", NewBaseContainer("id", "root")) - s.Add("id2", NewBaseContainer("id2", "root")) - - s.ApplyAll(func(cont *Container) { - if cont.ID == "id2" { - cont.ID = "newID" - } - }) - - cont := s.Get("id2") - if cont == nil { - t.Fatal("expected container to not be nil") - } - if cont.ID != "newID" { - t.Fatalf("expected newID, got %v", cont) - } -} diff --git a/container/monitor.go b/container/monitor.go deleted file mode 100644 index 6a7ece654..000000000 --- a/container/monitor.go +++ /dev/null @@ -1,46 +0,0 @@ -package container - -import ( - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - loggerCloseTimeout = 10 * time.Second -) - -// Reset puts a container into a state where it can be restarted again. -func (container *Container) Reset(lock bool) { - if lock { - container.Lock() - defer container.Unlock() - } - - if err := container.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", container.ID, err) - } - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.NewInputPipes() - } - - if container.LogDriver != nil { - if container.LogCopier != nil { - exit := make(chan struct{}) - go func() { - container.LogCopier.Wait() - close(exit) - }() - select { - case <-time.After(loggerCloseTimeout): - logrus.Warn("Logger didn't exit in time: logs may be truncated") - case <-exit: - } - } - container.LogDriver.Close() - container.LogCopier = nil - container.LogDriver = nil - } -} diff --git a/container/mounts_unix.go b/container/mounts_unix.go deleted file mode 100644 index c52abed2d..000000000 --- a/container/mounts_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package container - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` - Data string `json:"data"` - Propagation string `json:"mountpropagation"` -} diff --git a/container/mounts_windows.go b/container/mounts_windows.go deleted file mode 100644 index 01b327f78..000000000 --- a/container/mounts_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package container - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` -} diff --git a/container/state.go b/container/state.go deleted file mode 100644 index 081d91f41..000000000 --- a/container/state.go +++ /dev/null @@ -1,310 +0,0 @@ -package container - -import ( - "fmt" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/docker/go-units" -) - -// State holds the current container state, and has methods to get and -// set the state. Container has an embed, which allows all of the -// functions defined against State to run against Container. -type State struct { - sync.Mutex - // FIXME: Why do we have both paused and running if a - // container cannot be paused and running at the same time? - Running bool - Paused bool - Restarting bool - OOMKilled bool - RemovalInProgress bool // Not need for this to be persistent on disk. - Dead bool - Pid int - exitCode int - error string // contains last known error when starting the container - StartedAt time.Time - FinishedAt time.Time - waitChan chan struct{} - Health *Health -} - -// NewState creates a default state object with a fresh channel for state changes. -func NewState() *State { - return &State{ - waitChan: make(chan struct{}), - } -} - -// String returns a human-readable description of the state -func (s *State) String() string { - if s.Running { - if s.Paused { - return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago", s.exitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) - } - - if h := s.Health; h != nil { - return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) - } - return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - - if s.RemovalInProgress { - return "Removal In Progress" - } - - if s.Dead { - return "Dead" - } - - if s.StartedAt.IsZero() { - return "Created" - } - - if s.FinishedAt.IsZero() { - return "" - } - - return fmt.Sprintf("Exited (%d) %s ago", s.exitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) -} - -// StateString returns a single string to describe state -func (s *State) StateString() string { - if s.Running { - if s.Paused { - return "paused" - } - if s.Restarting { - return "restarting" - } - return "running" - } - - if s.Dead { - return "dead" - } - - if s.StartedAt.IsZero() { - return "created" - } - - return "exited" -} - -// IsValidStateString checks if the provided string is a valid container state or not. -func IsValidStateString(s string) bool { - if s != "paused" && - s != "restarting" && - s != "running" && - s != "dead" && - s != "created" && - s != "exited" { - return false - } - return true -} - -func wait(waitChan <-chan struct{}, timeout time.Duration) error { - if timeout < 0 { - <-waitChan - return nil - } - select { - case <-time.After(timeout): - return fmt.Errorf("Timed out: %v", timeout) - case <-waitChan: - return nil - } -} - -// WaitStop waits until state is stopped. If state already stopped it returns -// immediately. If you want wait forever you must supply negative timeout. -// Returns exit code, that was passed to SetStoppedLocking -func (s *State) WaitStop(timeout time.Duration) (int, error) { - s.Lock() - if !s.Running { - exitCode := s.exitCode - s.Unlock() - return exitCode, nil - } - waitChan := s.waitChan - s.Unlock() - if err := wait(waitChan, timeout); err != nil { - return -1, err - } - s.Lock() - defer s.Unlock() - return s.ExitCode(), nil -} - -// WaitWithContext waits for the container to stop. Optional context can be -// passed for canceling the request. -func (s *State) WaitWithContext(ctx context.Context) error { - // todo(tonistiigi): make other wait functions use this - s.Lock() - if !s.Running { - state := *s - defer s.Unlock() - if state.exitCode == 0 { - return nil - } - return &state - } - waitChan := s.waitChan - s.Unlock() - select { - case <-waitChan: - s.Lock() - state := *s - s.Unlock() - if state.exitCode == 0 { - return nil - } - return &state - case <-ctx.Done(): - return ctx.Err() - } -} - -// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. -func (s *State) IsRunning() bool { - s.Lock() - res := s.Running - s.Unlock() - return res -} - -// GetPID holds the process id of a container. -func (s *State) GetPID() int { - s.Lock() - res := s.Pid - s.Unlock() - return res -} - -// ExitCode returns current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) ExitCode() int { - res := s.exitCode - return res -} - -// SetExitCode sets current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) SetExitCode(ec int) { - s.exitCode = ec -} - -// SetRunning sets the state of the container to "running". -func (s *State) SetRunning(pid int, initial bool) { - s.error = "" - s.Running = true - s.Paused = false - s.Restarting = false - s.exitCode = 0 - s.Pid = pid - if initial { - s.StartedAt = time.Now().UTC() - } -} - -// SetStoppedLocking locks the container state and sets it to "stopped". -func (s *State) SetStoppedLocking(exitStatus *ExitStatus) { - s.Lock() - s.SetStopped(exitStatus) - s.Unlock() -} - -// SetStopped sets the container state to "stopped" without locking. -func (s *State) SetStopped(exitStatus *ExitStatus) { - s.Running = false - s.Paused = false - s.Restarting = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) -} - -// SetRestartingLocking is when docker handles the auto restart of containers when they are -// in the middle of a stop and being restarted again -func (s *State) SetRestartingLocking(exitStatus *ExitStatus) { - s.Lock() - s.SetRestarting(exitStatus) - s.Unlock() -} - -// SetRestarting sets the container state to "restarting". -// It also sets the container PID to 0. -func (s *State) SetRestarting(exitStatus *ExitStatus) { - // we should consider the container running when it is restarting because of - // all the checks in docker around rm/stop/etc - s.Running = true - s.Restarting = true - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) -} - -// SetError sets the container's error state. This is useful when we want to -// know the error that occurred when container transits to another state -// when inspecting it -func (s *State) SetError(err error) { - s.error = err.Error() -} - -// IsPaused returns whether the container is paused or not. -func (s *State) IsPaused() bool { - s.Lock() - res := s.Paused - s.Unlock() - return res -} - -// IsRestarting returns whether the container is restarting or not. -func (s *State) IsRestarting() bool { - s.Lock() - res := s.Restarting - s.Unlock() - return res -} - -// SetRemovalInProgress sets the container state as being removed. -// It returns true if the container was already in that state. -func (s *State) SetRemovalInProgress() bool { - s.Lock() - defer s.Unlock() - if s.RemovalInProgress { - return true - } - s.RemovalInProgress = true - return false -} - -// ResetRemovalInProgress makes the RemovalInProgress state to false. -func (s *State) ResetRemovalInProgress() { - s.Lock() - s.RemovalInProgress = false - s.Unlock() -} - -// SetDead sets the container state to "dead" -func (s *State) SetDead() { - s.Lock() - s.Dead = true - s.Unlock() -} - -// Error returns current error for the state. -func (s *State) Error() string { - return s.error -} diff --git a/container/state_solaris.go b/container/state_solaris.go deleted file mode 100644 index 9aef1d518..000000000 --- a/container/state_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode -} diff --git a/container/state_test.go b/container/state_test.go deleted file mode 100644 index 83ff1efcb..000000000 --- a/container/state_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "sync/atomic" - "testing" - "time" -) - -func TestStateRunStop(t *testing.T) { - s := NewState() - for i := 1; i < 3; i++ { // full lifecycle two times - s.Lock() - s.SetRunning(i+100, false) - s.Unlock() - - if !s.IsRunning() { - t.Fatal("State not running") - } - if s.Pid != i+100 { - t.Fatalf("Pid %v, expected %v", s.Pid, i+100) - } - if s.ExitCode() != 0 { - t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) - } - - stopped := make(chan struct{}) - var exit int64 - go func() { - exitCode, _ := s.WaitStop(-1 * time.Second) - atomic.StoreInt64(&exit, int64(exitCode)) - close(stopped) - }() - s.SetStoppedLocking(&ExitStatus{ExitCode: i}) - if s.IsRunning() { - t.Fatal("State is running") - } - if s.ExitCode() != i { - t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) - } - if s.Pid != 0 { - t.Fatalf("Pid %v, expected 0", s.Pid) - } - select { - case <-time.After(100 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - exitCode := int(atomic.LoadInt64(&exit)) - if exitCode != i { - t.Fatalf("ExitCode %v, expected %v", exitCode, i) - } - if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { - t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) - } - } -} - -func TestStateTimeoutWait(t *testing.T) { - s := NewState() - stopped := make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() - select { - case <-time.After(200 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - - s.SetStoppedLocking(&ExitStatus{ExitCode: 1}) - - stopped = make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() - select { - case <-time.After(200 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - -} diff --git a/container/state_unix.go b/container/state_unix.go deleted file mode 100644 index f09d015e0..000000000 --- a/container/state_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux freebsd - -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled -} diff --git a/container/state_windows.go b/container/state_windows.go deleted file mode 100644 index 9aef1d518..000000000 --- a/container/state_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode -} diff --git a/container/store.go b/container/store.go deleted file mode 100644 index 042fb1a34..000000000 --- a/container/store.go +++ /dev/null @@ -1,28 +0,0 @@ -package container - -// StoreFilter defines a function to filter -// container in the store. -type StoreFilter func(*Container) bool - -// StoreReducer defines a function to -// manipulate containers in the store -type StoreReducer func(*Container) - -// Store defines an interface that -// any container store must implement. -type Store interface { - // Add appends a new container to the store. - Add(string, *Container) - // Get returns a container from the store by the identifier it was stored with. - Get(string) *Container - // Delete removes a container from the store by the identifier it was stored with. - Delete(string) - // List returns a list of containers from the store. - List() []*Container - // Size returns the number of containers in the store. - Size() int - // First returns the first container found in the store by a given filter. - First(StoreFilter) *Container - // ApplyAll calls the reducer function with every container in the store. - ApplyAll(StoreReducer) -} diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go deleted file mode 100644 index e4065b4ad..000000000 --- a/daemon/apparmor_default.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux - -package daemon - -import ( - "github.com/Sirupsen/logrus" - aaprofile "github.com/docker/docker/profiles/apparmor" - "github.com/opencontainers/runc/libcontainer/apparmor" -) - -// Define constants for native driver -const ( - defaultApparmorProfile = "docker-default" -) - -func installDefaultAppArmorProfile() { - if apparmor.IsEnabled() { - if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { - apparmorProfiles := []string{defaultApparmorProfile} - - // Allow daemon to run if loading failed, but are active - // (possibly through another run, manually, or via system startup) - for _, policy := range apparmorProfiles { - if err := aaprofile.IsLoaded(policy); err != nil { - logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) - } - } - } - } -} diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go deleted file mode 100644 index f186a68af..000000000 --- a/daemon/apparmor_default_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !linux - -package daemon - -func installDefaultAppArmorProfile() { -} diff --git a/daemon/archive.go b/daemon/archive.go deleted file mode 100644 index b2221560a..000000000 --- a/daemon/archive.go +++ /dev/null @@ -1,436 +0,0 @@ -package daemon - -import ( - "errors" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/builder" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" -) - -// ErrExtractPointNotDirectory is used to convey that the operation to extract -// a tar archive to a directory in a container has failed because the specified -// path does not refer to a directory. -var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") - -// ContainerCopy performs a deprecated operation of archiving the resource at -// the specified path in the container identified by the given name. -func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if res[0] == '/' || res[0] == '\\' { - res = res[1:] - } - - return daemon.containerCopy(container, res) -} - -// ContainerStatPath stats the filesystem resource at the specified path in the -// container identified by the given name. -func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - return daemon.containerStatPath(container, path) -} - -// ContainerArchivePath creates an archive of the filesystem resource at the -// specified path in the container identified by the given name. Returns a -// tar archive of the resource and whether it was a directory or a single file. -func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, nil, err - } - - return daemon.containerArchivePath(container, path) -} - -// ContainerExtractToDir extracts the given archive to the specified location -// in the filesystem of the container identified by the given name. The given -// path must be of a directory in the container. If it is not, the error will -// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will -// be an error if unpacking the given content would cause an existing directory -// to be replaced with a non-directory and vice versa. -func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) -} - -// containerStatPath stats the filesystem resource at the specified path in this -// container. Returns stat info about the resource. -func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return nil, err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.UnmountVolumes(true, daemon.LogVolumeEvent) - if err != nil { - return nil, err - } - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, err - } - - return container.StatPath(resolvedPath, absPath) -} - -// containerArchivePath creates an archive of the filesystem resource at the specified -// path in this container. Returns a tar archive of the resource and stat info -// about the resource. -func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err = daemon.Mount(container); err != nil { - return nil, nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.UnmountVolumes(true, daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err = daemon.mountVolumes(container); err != nil { - return nil, nil, err - } - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, nil, err - } - - stat, err = container.StatPath(resolvedPath, absPath) - if err != nil { - return nil, nil, err - } - - // We need to rebase the archive entries if the last element of the - // resolved path was a symlink that was evaluated and is now different - // than the requested path. For example, if the given path was "/foo/bar/", - // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want - // to ensure that the archive entries start with "bar" and not "baz". This - // also catches the case when the root directory of the container is - // requested: we want the archive entries to start with "/" and not the - // container ID. - data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) - if err != nil { - return nil, nil, err - } - - content = ioutils.NewReadCloserWrapper(data, func() error { - err := data.Close() - container.UnmountVolumes(true, daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - - daemon.LogContainerEvent(container, "archive-path") - - return content, stat, nil -} - -// containerExtractToDir extracts the given tar archive to the specified location in the -// filesystem of this container. The given path must be of a directory in the -// container. If it is not, the error will be ErrExtractPointNotDirectory. If -// noOverwriteDirNonDir is true then it will be an error if unpacking the -// given content would cause an existing directory to be replaced with a non- -// directory and vice versa. -func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.UnmountVolumes(true, daemon.LogVolumeEvent) - if err != nil { - return err - } - - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) - if err != nil { - return err - } - - // The destination path needs to be resolved to a host path, with all - // symbolic links followed in the scope of the container's rootfs. Note - // that we do not use `container.ResolvePath(path)` here because we need - // to also evaluate the last path element if it is a symlink. This is so - // that you can extract an archive to a symlink that points to a directory. - - // Consider the given path as an absolute path in the container. - absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) - - // This will evaluate the last path element if it is a symlink. - resolvedPath, err := container.GetResourcePath(absPath) - if err != nil { - return err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return err - } - - if !stat.IsDir() { - return ErrExtractPointNotDirectory - } - - // Need to check if the path is in a volume. If it is, it cannot be in a - // read-only volume. If it is not in a volume, the container cannot be - // configured with a read-only rootfs. - - // Use the resolved path relative to the container rootfs as the new - // absPath. This way we fully follow any symlinks in a volume that may - // lead back outside the volume. - // - // The Windows implementation of filepath.Rel in golang 1.4 does not - // support volume style file path semantics. On Windows when using the - // filter driver, we are guaranteed that the path will always be - // a volume file path. - var baseRel string - if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { - if strings.HasPrefix(resolvedPath, container.BaseFS) { - baseRel = resolvedPath[len(container.BaseFS):] - if baseRel[:1] == `\` { - baseRel = baseRel[1:] - } - } - } else { - baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) - } - if err != nil { - return err - } - // Make it an absolute path. - absPath = filepath.Join(string(filepath.Separator), baseRel) - - toVolume, err := checkIfPathIsInAVolume(container, absPath) - if err != nil { - return err - } - - if !toVolume && container.HostConfig.ReadonlyRootfs { - return ErrRootFSReadOnly - } - - uid, gid := daemon.GetRemappedUIDGID() - options := &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - ChownOpts: &archive.TarChownOptions{ - UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? - }, - } - if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { - return err - } - - daemon.LogContainerEvent(container, "extract-to-dir") - - return nil -} - -func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err := daemon.Mount(container); err != nil { - return nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.UnmountVolumes(true, daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err := daemon.mountVolumes(container); err != nil { - return nil, err - } - - basePath, err := container.GetResourcePath(resource) - if err != nil { - return nil, err - } - stat, err := os.Stat(basePath) - if err != nil { - return nil, err - } - var filter []string - if !stat.IsDir() { - d, f := filepath.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{filepath.Base(basePath)} - basePath = filepath.Dir(basePath) - } - archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeFiles: filter, - }) - if err != nil { - return nil, err - } - - reader := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.UnmountVolumes(true, daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - daemon.LogContainerEvent(container, "copy") - return reader, nil -} - -// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container -// specified by a container object. -// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). -// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. -func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { - srcPath := src.Path() - destExists := true - destDir := false - rootUID, rootGID := daemon.GetRemappedUIDGID() - - // Work in daemon-local OS specific file paths - destPath = filepath.FromSlash(destPath) - - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - err = daemon.Mount(c) - if err != nil { - return err - } - defer daemon.Unmount(c) - - dest, err := c.GetResourcePath(destPath) - if err != nil { - return err - } - - // Preserve the trailing slash - // TODO: why are we appending another path separator if there was already one? - if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { - destDir = true - dest += string(os.PathSeparator) - } - - destPath = dest - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) - return err - } - destExists = false - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archiver := &archive.Archiver{ - Untar: chrootarchive.Untar, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - - if src.IsDir() { - // copy as directory - if err := archiver.CopyWithTar(srcPath, destPath); err != nil { - return err - } - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) - } - if decompress && archive.IsArchivePath(srcPath) { - // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in slash - tarDest := destPath - if strings.HasSuffix(tarDest, string(os.PathSeparator)) { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - err := archiver.UntarPath(srcPath, tarDest) - /* - if err != nil { - logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) - } - */ - return err - } - - // only needed for fixPermissions, but might as well put it before CopyFileWithTar - if destDir || (destExists && destStat.IsDir()) { - destPath = filepath.Join(destPath, src.Name()) - } - - if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { - return err - } - if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { - return err - } - - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) -} diff --git a/daemon/archive_unix.go b/daemon/archive_unix.go deleted file mode 100644 index 47666fe5e..000000000 --- a/daemon/archive_unix.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/container" -) - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - var toVolume bool - for _, mnt := range container.MountPoints { - if toVolume = mnt.HasResource(absPath); toVolume { - if mnt.RW { - break - } - return false, ErrVolumeReadonly - } - } - return toVolume, nil -} - -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // If the destination didn't already exist, or the destination isn't a - // directory, then we should Lchown the destination. Otherwise, we shouldn't - // Lchown the destination. - destStat, err := os.Stat(destination) - if err != nil { - // This should *never* be reached, because the destination must've already - // been created while untar-ing the context. - return err - } - doChownDestination := !destExisted || !destStat.IsDir() - - // We Walk on the source rather than on the destination because we don't - // want to change permissions on things we haven't created or modified. - return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root iff. it existed before, as it doesn't fall under - // the domain of "things we should chown". - if !doChownDestination && (source == fullpath) { - return nil - } - - // Path is prefixed by source: substitute with destination instead. - cleaned, err := filepath.Rel(source, fullpath) - if err != nil { - return err - } - - fullpath = filepath.Join(destination, cleaned) - return os.Lchown(fullpath, uid, gid) - }) -} diff --git a/daemon/archive_windows.go b/daemon/archive_windows.go deleted file mode 100644 index b3a104534..000000000 --- a/daemon/archive_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package daemon - -import "github.com/docker/docker/container" - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -// -// This is a no-op on Windows which does not support read-only volumes, or -// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - return false, nil -} - -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // chown is not supported on Windows - return nil -} diff --git a/daemon/attach.go b/daemon/attach.go deleted file mode 100644 index 3d4a51eea..000000000 --- a/daemon/attach.go +++ /dev/null @@ -1,147 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" -) - -// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. -func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { - keys := []byte{} - var err error - if c.DetachKeys != "" { - keys, err = term.ToBytes(c.DetachKeys) - if err != nil { - return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) - } - } - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - if container.IsPaused() { - err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) - return errors.NewRequestConflictError(err) - } - - inStream, outStream, errStream, err := c.GetStreams() - if err != nil { - return err - } - defer inStream.Close() - - if !container.Config.Tty && c.MuxStreams { - errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - var stdin io.ReadCloser - var stdout, stderr io.Writer - - if c.UseStdin { - stdin = inStream - } - if c.UseStdout { - stdout = outStream - } - if c.UseStderr { - stderr = errStream - } - - if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { - fmt.Fprintf(outStream, "Error attaching: %s\n", err) - } - return nil -} - -// ContainerAttachRaw attaches the provided streams to the container's stdio -func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) -} - -func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { - if logs { - logDriver, err := daemon.getLogger(c) - if err != nil { - return err - } - cLog, ok := logDriver.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported - } - logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) - - LogLoop: - for { - select { - case msg, ok := <-logs.Msg: - if !ok { - break LogLoop - } - if msg.Source == "stdout" && stdout != nil { - stdout.Write(msg.Line) - } - if msg.Source == "stderr" && stderr != nil { - stderr.Write(msg.Line) - } - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - break LogLoop - } - } - } - - daemon.LogContainerEvent(c, "attach") - - //stream - if stream { - var stdinPipe io.ReadCloser - if stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - io.Copy(w, stdin) - }() - stdinPipe = r - } - - waitChan := make(chan struct{}) - if c.Config.StdinOnce && !c.Config.Tty { - go func() { - c.WaitStop(-1 * time.Second) - close(waitChan) - }() - } - - err := <-c.Attach(stdinPipe, stdout, stderr, keys) - if err != nil { - if _, ok := err.(container.DetachError); ok { - daemon.LogContainerEvent(c, "detach") - } else { - logrus.Errorf("attach failed with error: %v", err) - } - } - - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if c.Config.StdinOnce && !c.Config.Tty { - <-waitChan - } - } - return nil -} diff --git a/daemon/auth.go b/daemon/auth.go deleted file mode 100644 index a1400d88a..000000000 --- a/daemon/auth.go +++ /dev/null @@ -1,13 +0,0 @@ -package daemon - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/dockerversion" - "github.com/docker/engine-api/types" -) - -// AuthenticateToRegistry checks the validity of credentials in authConfig -func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { - return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) -} diff --git a/daemon/caps/utils_unix.go b/daemon/caps/utils_unix.go deleted file mode 100644 index c99485f51..000000000 --- a/daemon/caps/utils_unix.go +++ /dev/null @@ -1,131 +0,0 @@ -// +build !windows - -package caps - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/stringutils" - "github.com/syndtr/gocapability/capability" -) - -var capabilityList Capabilities - -func init() { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - for _, cap := range capability.List() { - if cap > last { - continue - } - capabilityList = append(capabilityList, - &CapabilityMapping{ - Key: "CAP_" + strings.ToUpper(cap.String()), - Value: cap, - }, - ) - } -} - -type ( - // CapabilityMapping maps linux capability name to its value of capability.Cap type - // Capabilities is one of the security systems in Linux Security Module (LSM) - // framework provided by the kernel. - // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html - CapabilityMapping struct { - Key string `json:"key,omitempty"` - Value capability.Cap `json:"value,omitempty"` - } - // Capabilities contains all CapabilityMapping - Capabilities []*CapabilityMapping -) - -// String returns of CapabilityMapping -func (c *CapabilityMapping) String() string { - return c.Key -} - -// GetCapability returns CapabilityMapping which contains specific key -func GetCapability(key string) *CapabilityMapping { - for _, capp := range capabilityList { - if capp.Key == key { - cpy := *capp - return &cpy - } - } - return nil -} - -// GetAllCapabilities returns all of the capabilities -func GetAllCapabilities() []string { - output := make([]string, len(capabilityList)) - for i, capability := range capabilityList { - output[i] = capability.String() - } - return output -} - -// TweakCapabilities can tweak capabilities by adding or dropping capabilities -// based on the basics capabilities. -func TweakCapabilities(basics, adds, drops []string) ([]string, error) { - var ( - newCaps []string - allCaps = GetAllCapabilities() - ) - - // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix - // Currently they are mixed in here. We should do conversion in one place. - - // look for invalid cap in the drop list - for _, cap := range drops { - if strings.ToLower(cap) == "all" { - continue - } - - if !stringutils.InSlice(allCaps, "CAP_"+cap) { - return nil, fmt.Errorf("Unknown capability drop: %q", cap) - } - } - - // handle --cap-add=all - if stringutils.InSlice(adds, "all") { - basics = allCaps - } - - if !stringutils.InSlice(drops, "all") { - for _, cap := range basics { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - // if we don't drop `all`, add back all the non-dropped caps - if !stringutils.InSlice(drops, cap[4:]) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - } - - for _, cap := range adds { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - cap = "CAP_" + cap - - if !stringutils.InSlice(allCaps, cap) { - return nil, fmt.Errorf("Unknown capability to add: %q", cap) - } - - // add cap if not already in the list - if !stringutils.InSlice(newCaps, cap) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - return newCaps, nil -} diff --git a/daemon/changes.go b/daemon/changes.go deleted file mode 100644 index 7a58763cd..000000000 --- a/daemon/changes.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon - -import "github.com/docker/docker/pkg/archive" - -// ContainerChanges returns a list of container fs changes -func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - return container.RWLayer.Changes() -} diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go deleted file mode 100644 index fa3ece8fa..000000000 --- a/daemon/cluster/cluster.go +++ /dev/null @@ -1,1236 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "google.golang.org/grpc" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/cluster/convert" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/docker/daemon/cluster/executor/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/runconfig" - apitypes "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - types "github.com/docker/engine-api/types/swarm" - swarmagent "github.com/docker/swarmkit/agent" - swarmapi "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -const swarmDirName = "swarm" -const controlSocket = "control.sock" -const swarmConnectTimeout = 20 * time.Second -const stateFile = "docker-state.json" -const defaultAddr = "0.0.0.0:2377" - -const ( - initialReconnectDelay = 100 * time.Millisecond - maxReconnectDelay = 30 * time.Second -) - -// ErrNoSwarm is returned on leaving a cluster that was never initialized -var ErrNoSwarm = fmt.Errorf("This node is not part of Swarm") - -// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated -var ErrSwarmExists = fmt.Errorf("This node is already part of a Swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.") - -// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. -var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") - -// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. -var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current Swarm status of your node.") - -// defaultSpec contains some sane defaults if cluster options are missing on init -var defaultSpec = types.Spec{ - Raft: types.RaftConfig{ - SnapshotInterval: 10000, - KeepOldSnapshots: 0, - LogEntriesForSlowFollowers: 500, - HeartbeatTick: 1, - ElectionTick: 3, - }, - CAConfig: types.CAConfig{ - NodeCertExpiry: 90 * 24 * time.Hour, - }, - Dispatcher: types.DispatcherConfig{ - HeartbeatPeriod: uint64((5 * time.Second).Nanoseconds()), - }, - Orchestration: types.OrchestrationConfig{ - TaskHistoryRetentionLimit: 10, - }, -} - -type state struct { - ListenAddr string -} - -// Config provides values for Cluster. -type Config struct { - Root string - Name string - Backend executorpkg.Backend -} - -// Cluster provides capabilities to participate in a cluster as a worker or a -// manager. -type Cluster struct { - sync.RWMutex - *node - root string - config Config - configEvent chan struct{} // todo: make this array and goroutine safe - listenAddr string - stop bool - err error - cancelDelay func() -} - -type node struct { - *swarmagent.Node - done chan struct{} - ready bool - conn *grpc.ClientConn - client swarmapi.ControlClient - reconnectDelay time.Duration -} - -// New creates a new Cluster instance using provided config. -func New(config Config) (*Cluster, error) { - root := filepath.Join(config.Root, swarmDirName) - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - c := &Cluster{ - root: root, - config: config, - configEvent: make(chan struct{}, 10), - } - - st, err := c.loadState() - if err != nil { - if os.IsNotExist(err) { - return c, nil - } - return nil, err - } - - n, err := c.startNewNode(false, st.ListenAddr, "", "", "", false) - if err != nil { - return nil, err - } - - select { - case <-time.After(swarmConnectTimeout): - logrus.Errorf("swarm component could not be started before timeout was reached") - case <-n.Ready(): - case <-n.done: - return nil, fmt.Errorf("swarm component could not be started: %v", c.err) - } - go c.reconnectOnFailure(n) - return c, nil -} - -func (c *Cluster) loadState() (*state, error) { - dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) - if err != nil { - return nil, err - } - // missing certificate means no actual state to restore from - if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { - if os.IsNotExist(err) { - c.clearState() - } - return nil, err - } - var st state - if err := json.Unmarshal(dt, &st); err != nil { - return nil, err - } - return &st, nil -} - -func (c *Cluster) saveState() error { - dt, err := json.Marshal(state{ListenAddr: c.listenAddr}) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) -} - -func (c *Cluster) reconnectOnFailure(n *node) { - for { - <-n.done - c.Lock() - if c.stop || c.node != nil { - c.Unlock() - return - } - n.reconnectDelay *= 2 - if n.reconnectDelay > maxReconnectDelay { - n.reconnectDelay = maxReconnectDelay - } - logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) - delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) - c.cancelDelay = cancel - c.Unlock() - <-delayCtx.Done() - if delayCtx.Err() != context.DeadlineExceeded { - return - } - c.Lock() - if c.node != nil { - c.Unlock() - return - } - var err error - n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false) - if err != nil { - c.err = err - close(n.done) - } - c.Unlock() - } -} - -func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*node, error) { - if err := c.config.Backend.IsSwarmCompatible(); err != nil { - return nil, err - } - c.node = nil - c.cancelDelay = nil - c.stop = false - n, err := swarmagent.NewNode(&swarmagent.NodeConfig{ - Hostname: c.config.Name, - ForceNewCluster: forceNewCluster, - ListenControlAPI: filepath.Join(c.root, controlSocket), - ListenRemoteAPI: listenAddr, - JoinAddr: joinAddr, - StateDir: c.root, - CAHash: cahash, - Secret: secret, - Executor: container.NewExecutor(c.config.Backend), - HeartbeatTick: 1, - ElectionTick: 3, - IsManager: ismanager, - }) - if err != nil { - return nil, err - } - ctx := context.Background() - if err := n.Start(ctx); err != nil { - return nil, err - } - node := &node{ - Node: n, - done: make(chan struct{}), - reconnectDelay: initialReconnectDelay, - } - c.node = node - c.listenAddr = listenAddr - c.saveState() - c.config.Backend.SetClusterProvider(c) - go func() { - err := n.Err(ctx) - if err != nil { - logrus.Errorf("cluster exited with error: %v", err) - } - c.Lock() - c.node = nil - c.err = err - c.Unlock() - close(node.done) - }() - - go func() { - select { - case <-n.Ready(): - c.Lock() - node.ready = true - c.err = nil - c.Unlock() - case <-ctx.Done(): - } - c.configEvent <- struct{}{} - }() - - go func() { - for conn := range n.ListenControlSocket(ctx) { - c.Lock() - if node.conn != conn { - if conn == nil { - node.client = nil - } else { - node.client = swarmapi.NewControlClient(conn) - } - } - node.conn = conn - c.Unlock() - c.configEvent <- struct{}{} - } - }() - - return node, nil -} - -// Init initializes new cluster from user provided request. -func (c *Cluster) Init(req types.InitRequest) (string, error) { - c.Lock() - if node := c.node; node != nil { - if !req.ForceNewCluster { - c.Unlock() - return "", errSwarmExists(node) - } - if err := c.stopNode(); err != nil { - c.Unlock() - return "", err - } - } - - if err := validateAndSanitizeInitRequest(&req); err != nil { - c.Unlock() - return "", err - } - - // todo: check current state existing - n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false) - if err != nil { - c.Unlock() - return "", err - } - c.Unlock() - - select { - case <-n.Ready(): - if err := initClusterSpec(n, req.Spec); err != nil { - return "", err - } - go c.reconnectOnFailure(n) - return n.NodeID(), nil - case <-n.done: - c.RLock() - defer c.RUnlock() - if !req.ForceNewCluster { // if failure on first attempt don't keep state - if err := c.clearState(); err != nil { - return "", err - } - } - return "", c.err - } -} - -// Join makes current Cluster part of an existing swarm cluster. -func (c *Cluster) Join(req types.JoinRequest) error { - c.Lock() - if node := c.node; node != nil { - c.Unlock() - return errSwarmExists(node) - } - if err := validateAndSanitizeJoinRequest(&req); err != nil { - c.Unlock() - return err - } - // todo: check current state existing - n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager) - if err != nil { - c.Unlock() - return err - } - c.Unlock() - - certificateRequested := n.CertificateRequested() - for { - select { - case <-certificateRequested: - if n.NodeMembership() == swarmapi.NodeMembershipPending { - return fmt.Errorf("Your node is in the process of joining the cluster but needs to be accepted by existing cluster member.\nTo accept this node into cluster run \"docker node accept %v\" in an existing cluster manager. Use \"docker info\" command to see the current Swarm status of your node.", n.NodeID()) - } - certificateRequested = nil - case <-time.After(swarmConnectTimeout): - // attempt to connect will continue in background, also reconnecting - go c.reconnectOnFailure(n) - return ErrSwarmJoinTimeoutReached - case <-n.Ready(): - go c.reconnectOnFailure(n) - return nil - case <-n.done: - c.RLock() - defer c.RUnlock() - return c.err - } - } -} - -// stopNode is a helper that stops the active c.node and waits until it has -// shut down. Call while keeping the cluster lock. -func (c *Cluster) stopNode() error { - if c.node == nil { - return nil - } - c.stop = true - if c.cancelDelay != nil { - c.cancelDelay() - c.cancelDelay = nil - } - node := c.node - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - // TODO: can't hold lock on stop because it calls back to network - c.Unlock() - defer c.Lock() - if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { - return err - } - <-node.done - return nil -} - -// Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { - c.Lock() - node := c.node - if node == nil { - c.Unlock() - return ErrNoSwarm - } - - if node.Manager() != nil && !force { - msg := "You are attempting to leave cluster on a node that is participating as a manager. " - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - if active && reachable-2 <= unreachable { - if reachable == 1 && unreachable == 0 { - msg += "Removing the last manager will erase all current state of the cluster. Use `--force` to ignore this message. " - c.Unlock() - return fmt.Errorf(msg) - } - msg += fmt.Sprintf("Leaving the cluster will leave you with %v managers out of %v. This means Raft quorum will be lost and your cluster will become inaccessible. ", reachable-1, reachable+unreachable) - } - } - } else { - msg += "Doing so may lose the consensus of your cluster. " - } - - msg += "The only way to restore a cluster that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to ignore this message." - c.Unlock() - return fmt.Errorf(msg) - } - if err := c.stopNode(); err != nil { - c.Unlock() - return err - } - c.Unlock() - if nodeID := node.NodeID(); nodeID != "" { - for _, id := range c.config.Backend.ListContainersForNode(nodeID) { - if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { - logrus.Errorf("error removing %v: %v", id, err) - } - } - } - c.configEvent <- struct{}{} - // todo: cleanup optional? - if err := c.clearState(); err != nil { - return err - } - return nil -} - -func (c *Cluster) clearState() error { - // todo: backup this data instead of removing? - if err := os.RemoveAll(c.root); err != nil { - return err - } - if err := os.MkdirAll(c.root, 0700); err != nil { - return err - } - c.config.Backend.SetClusterProvider(nil) - return nil -} - -func (c *Cluster) getRequestContext() context.Context { // TODO: not needed when requests don't block on qourum lost - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) - return ctx -} - -// Inspect retrieves the configuration properties of a managed swarm cluster. -func (c *Cluster) Inspect() (types.Swarm, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Swarm{}, c.errNoManager() - } - - swarm, err := getSwarm(c.getRequestContext(), c.client) - if err != nil { - return types.Swarm{}, err - } - - if err != nil { - return types.Swarm{}, err - } - - return convert.SwarmFromGRPC(*swarm), nil -} - -// Update updates configuration of a managed swarm cluster. -func (c *Cluster) Update(version uint64, spec types.Spec) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - swarm, err := getSwarm(c.getRequestContext(), c.client) - if err != nil { - return err - } - - swarmSpec, err := convert.SwarmSpecToGRPCandMerge(spec, &swarm.Spec) - if err != nil { - return err - } - - _, err = c.client.UpdateCluster( - c.getRequestContext(), - &swarmapi.UpdateClusterRequest{ - ClusterID: swarm.ID, - Spec: &swarmSpec, - ClusterVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// IsManager returns true if Cluster is participating as a manager. -func (c *Cluster) IsManager() bool { - c.RLock() - defer c.RUnlock() - return c.isActiveManager() -} - -// IsAgent returns true if Cluster is participating as a worker/agent. -func (c *Cluster) IsAgent() bool { - c.RLock() - defer c.RUnlock() - return c.node != nil && c.ready -} - -// GetListenAddress returns the listening address for current manager's -// consensus and dispatcher APIs. -func (c *Cluster) GetListenAddress() string { - c.RLock() - defer c.RUnlock() - if c.isActiveManager() { - return c.listenAddr - } - return "" -} - -// GetRemoteAddress returns a known advertise address of a remote manager if -// available. -// todo: change to array/connect with info -func (c *Cluster) GetRemoteAddress() string { - c.RLock() - defer c.RUnlock() - return c.getRemoteAddress() -} - -func (c *Cluster) getRemoteAddress() string { - if c.node == nil { - return "" - } - nodeID := c.node.NodeID() - for _, r := range c.node.Remotes() { - if r.NodeID != nodeID { - return r.Addr - } - } - return "" -} - -// ListenClusterEvents returns a channel that receives messages on cluster -// participation changes. -// todo: make cancelable and accessible to multiple callers -func (c *Cluster) ListenClusterEvents() <-chan struct{} { - return c.configEvent -} - -// Info returns information about the current cluster state. -func (c *Cluster) Info() types.Info { - var info types.Info - c.RLock() - defer c.RUnlock() - - if c.node == nil { - info.LocalNodeState = types.LocalNodeStateInactive - if c.cancelDelay != nil { - info.LocalNodeState = types.LocalNodeStateError - } - } else { - info.LocalNodeState = types.LocalNodeStatePending - if c.ready == true { - info.LocalNodeState = types.LocalNodeStateActive - } - } - if c.err != nil { - info.Error = c.err.Error() - } - if c.isActiveManager() { - info.ControlAvailable = true - if r, err := c.client.ListNodes(c.getRequestContext(), &swarmapi.ListNodesRequest{}); err == nil { - info.Nodes = len(r.Nodes) - for _, n := range r.Nodes { - if n.ManagerStatus != nil { - info.Managers = info.Managers + 1 - } - } - } - - if swarm, err := getSwarm(c.getRequestContext(), c.client); err == nil && swarm != nil { - info.CACertHash = swarm.RootCA.CACertHash - } - } - - if c.node != nil { - for _, r := range c.node.Remotes() { - info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) - } - info.NodeID = c.node.NodeID() - } - - return info -} - -// isActiveManager should not be called without a read lock -func (c *Cluster) isActiveManager() bool { - return c.node != nil && c.conn != nil -} - -// errNoManager returns error describing why manager commands can't be used. -// Call with read lock. -func (c *Cluster) errNoManager() error { - if c.node == nil { - return fmt.Errorf("This node is not a Swarm manager. Use \"docker swarm init\" or \"docker swarm join --manager\" to connect this node to Swarm and try again.") - } - if c.node.Manager() != nil { - return fmt.Errorf("This node is not a Swarm manager. Manager is being prepared or has trouble connecting to the cluster.") - } - return fmt.Errorf("This node is not a Swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") -} - -// GetServices returns all services of a managed swarm cluster. -func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListServicesFilters(options.Filter) - if err != nil { - return nil, err - } - r, err := c.client.ListServices( - c.getRequestContext(), - &swarmapi.ListServicesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - services := []types.Service{} - - for _, service := range r.Services { - services = append(services, convert.ServiceFromGRPC(*service)) - } - - return services, nil -} - -// CreateService creates a new service in a managed swarm cluster. -func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - ctx := c.getRequestContext() - - err := populateNetworkID(ctx, c.client, &s) - if err != nil { - return "", err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(s) - if err != nil { - return "", err - } - - if encodedAuth != "" { - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return "", fmt.Errorf("service does not use container tasks") - } - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } - - r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return "", err - } - - return r.Service.ID, nil -} - -// GetService returns a service based on an ID or name. -func (c *Cluster) GetService(input string) (types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Service{}, c.errNoManager() - } - - service, err := getService(c.getRequestContext(), c.client, input) - if err != nil { - return types.Service{}, err - } - return convert.ServiceFromGRPC(*service), nil -} - -// UpdateService updates existing service to match new properties. -func (c *Cluster) UpdateService(serviceID string, version uint64, spec types.ServiceSpec, encodedAuth string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx := c.getRequestContext() - - err := populateNetworkID(ctx, c.client, &spec) - if err != nil { - return err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(spec) - if err != nil { - return err - } - - if encodedAuth != "" { - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return fmt.Errorf("service does not use container tasks") - } - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } else { - // this is needed because if the encodedAuth isn't being updated then we - // shouldn't lose it, and continue to use the one that was already present - currentService, err := getService(c.getRequestContext(), c.client, serviceID) - if err != nil { - return err - } - ctnr := currentService.Spec.Task.GetContainer() - if ctnr == nil { - return fmt.Errorf("service does not use container tasks") - } - serviceSpec.Task.GetContainer().PullOptions = ctnr.PullOptions - } - - _, err = c.client.UpdateService( - c.getRequestContext(), - &swarmapi.UpdateServiceRequest{ - ServiceID: serviceID, - Spec: &serviceSpec, - ServiceVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// RemoveService removes a service from a managed swarm cluster. -func (c *Cluster) RemoveService(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - service, err := getService(c.getRequestContext(), c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveService(c.getRequestContext(), &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { - return err - } - return nil -} - -// GetNodes returns a list of all nodes known to a cluster. -func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListNodesFilters(options.Filter) - if err != nil { - return nil, err - } - r, err := c.client.ListNodes( - c.getRequestContext(), - &swarmapi.ListNodesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - nodes := []types.Node{} - - for _, node := range r.Nodes { - nodes = append(nodes, convert.NodeFromGRPC(*node)) - } - return nodes, nil -} - -// GetNode returns a node based on an ID or name. -func (c *Cluster) GetNode(input string) (types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Node{}, c.errNoManager() - } - - node, err := getNode(c.getRequestContext(), c.client, input) - if err != nil { - return types.Node{}, err - } - return convert.NodeFromGRPC(*node), nil -} - -// UpdateNode updates existing nodes properties. -func (c *Cluster) UpdateNode(nodeID string, version uint64, spec types.NodeSpec) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - nodeSpec, err := convert.NodeSpecToGRPC(spec) - if err != nil { - return err - } - - _, err = c.client.UpdateNode( - c.getRequestContext(), - &swarmapi.UpdateNodeRequest{ - NodeID: nodeID, - Spec: &nodeSpec, - NodeVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// RemoveNode removes a node from a cluster -func (c *Cluster) RemoveNode(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx := c.getRequestContext() - - node, err := getNode(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID}); err != nil { - return err - } - return nil -} - -// GetTasks returns a list of tasks matching the filter options. -func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - byName := func(filter filters.Args) error { - if filter.Include("service") { - serviceFilters := filter.Get("service") - for _, serviceFilter := range serviceFilters { - service, err := c.GetService(serviceFilter) - if err != nil { - return err - } - filter.Del("service", serviceFilter) - filter.Add("service", service.ID) - } - } - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - node, err := c.GetNode(nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", node.ID) - } - } - return nil - } - - filters, err := newListTasksFilters(options.Filter, byName) - if err != nil { - return nil, err - } - r, err := c.client.ListTasks( - c.getRequestContext(), - &swarmapi.ListTasksRequest{Filters: filters}) - if err != nil { - return nil, err - } - - tasks := []types.Task{} - - for _, task := range r.Tasks { - tasks = append(tasks, convert.TaskFromGRPC(*task)) - } - return tasks, nil -} - -// GetTask returns a task by an ID. -func (c *Cluster) GetTask(input string) (types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Task{}, c.errNoManager() - } - - task, err := getTask(c.getRequestContext(), c.client, input) - if err != nil { - return types.Task{}, err - } - return convert.TaskFromGRPC(*task), nil -} - -// GetNetwork returns a cluster network by an ID. -func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return apitypes.NetworkResource{}, c.errNoManager() - } - - network, err := getNetwork(c.getRequestContext(), c.client, input) - if err != nil { - return apitypes.NetworkResource{}, err - } - return convert.BasicNetworkFromGRPC(*network), nil -} - -// GetNetworks returns all current cluster managed networks. -func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - r, err := c.client.ListNetworks(c.getRequestContext(), &swarmapi.ListNetworksRequest{}) - if err != nil { - return nil, err - } - - var networks []apitypes.NetworkResource - - for _, network := range r.Networks { - networks = append(networks, convert.BasicNetworkFromGRPC(*network)) - } - - return networks, nil -} - -// CreateNetwork creates a new cluster managed network. -func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - if runconfig.IsPreDefinedNetwork(s.Name) { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) - return "", errors.NewRequestForbiddenError(err) - } - - networkSpec := convert.BasicNetworkCreateToGRPC(s) - r, err := c.client.CreateNetwork(c.getRequestContext(), &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) - if err != nil { - return "", err - } - - return r.Network.ID, nil -} - -// RemoveNetwork removes a cluster network. -func (c *Cluster) RemoveNetwork(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - network, err := getNetwork(c.getRequestContext(), c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNetwork(c.getRequestContext(), &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { - return err - } - return nil -} - -func populateNetworkID(ctx context.Context, c swarmapi.ControlClient, s *types.ServiceSpec) error { - for i, n := range s.Networks { - apiNetwork, err := getNetwork(ctx, c, n.Target) - if err != nil { - return err - } - s.Networks[i].Target = apiNetwork.ID - } - return nil -} - -func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { - // GetNetwork to match via full ID. - rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) - if err != nil { - // If any error (including NotFound), ListNetworks to match via ID prefix and full name. - rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Networks) == 0 { - rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Networks) == 0 { - return nil, fmt.Errorf("network %s not found", input) - } - - if l := len(rl.Networks); l > 1 { - return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) - } - - return rl.Networks[0], nil - } - return rg.Network, nil -} - -// Cleanup stops active swarm node. This is run before daemon shutdown. -func (c *Cluster) Cleanup() { - c.Lock() - node := c.node - if node == nil { - c.Unlock() - return - } - defer c.Unlock() - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - singlenode := active && reachable == 1 && unreachable == 0 - if active && !singlenode && reachable-2 <= unreachable { - logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) - } - } - } - c.stopNode() -} - -func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { - ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) - nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) - if err != nil { - return false, 0, 0, err - } - for _, n := range nodes.Nodes { - if n.ManagerStatus != nil { - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { - reachable++ - if n.ID == c.node.NodeID() { - current = true - } - } - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { - unreachable++ - } - } - } - return -} - -func validateAndSanitizeInitRequest(req *types.InitRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - - spec := &req.Spec - // provide sane defaults instead of erroring - if spec.Name == "" { - spec.Name = "default" - } - if spec.Raft.SnapshotInterval == 0 { - spec.Raft.SnapshotInterval = defaultSpec.Raft.SnapshotInterval - } - if spec.Raft.LogEntriesForSlowFollowers == 0 { - spec.Raft.LogEntriesForSlowFollowers = defaultSpec.Raft.LogEntriesForSlowFollowers - } - if spec.Raft.ElectionTick == 0 { - spec.Raft.ElectionTick = defaultSpec.Raft.ElectionTick - } - if spec.Raft.HeartbeatTick == 0 { - spec.Raft.HeartbeatTick = defaultSpec.Raft.HeartbeatTick - } - if spec.Dispatcher.HeartbeatPeriod == 0 { - spec.Dispatcher.HeartbeatPeriod = defaultSpec.Dispatcher.HeartbeatPeriod - } - if spec.CAConfig.NodeCertExpiry == 0 { - spec.CAConfig.NodeCertExpiry = defaultSpec.CAConfig.NodeCertExpiry - } - if spec.Orchestration.TaskHistoryRetentionLimit == 0 { - spec.Orchestration.TaskHistoryRetentionLimit = defaultSpec.Orchestration.TaskHistoryRetentionLimit - } - return nil -} - -func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - if len(req.RemoteAddrs) == 0 { - return fmt.Errorf("at least 1 RemoteAddr is required to join") - } - for i := range req.RemoteAddrs { - req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) - if err != nil { - return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) - } - } - if req.CACertHash != "" { - if _, err := digest.ParseDigest(req.CACertHash); err != nil { - return fmt.Errorf("invalid CACertHash %q, %v", req.CACertHash, err) - } - } - return nil -} - -func validateAddr(addr string) (string, error) { - if addr == "" { - return addr, fmt.Errorf("invalid empty address") - } - newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) - if err != nil { - return addr, nil - } - return strings.TrimPrefix(newaddr, "tcp://"), nil -} - -func errSwarmExists(node *node) error { - if node.NodeMembership() != swarmapi.NodeMembershipAccepted { - return ErrPendingSwarmExists - } - return ErrSwarmExists -} - -func initClusterSpec(node *node, spec types.Spec) error { - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) - for conn := range node.ListenControlSocket(ctx) { - if ctx.Err() != nil { - return ctx.Err() - } - if conn != nil { - client := swarmapi.NewControlClient(conn) - var cluster *swarmapi.Cluster - for i := 0; ; i++ { - lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return fmt.Errorf("error on listing clusters: %v", err) - } - if len(lcr.Clusters) == 0 { - if i < 10 { - time.Sleep(200 * time.Millisecond) - continue - } - return fmt.Errorf("empty list of clusters was returned") - } - cluster = lcr.Clusters[0] - break - } - newspec, err := convert.SwarmSpecToGRPCandMerge(spec, &cluster.Spec) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ - ClusterID: cluster.ID, - ClusterVersion: &cluster.Meta.Version, - Spec: &newspec, - }) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - return nil - } - } - return ctx.Err() -} diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go deleted file mode 100644 index 83cc5342b..000000000 --- a/daemon/cluster/convert/container.go +++ /dev/null @@ -1,116 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { - containerSpec := types.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Env: c.Env, - Dir: c.Dir, - User: c.User, - } - - // Mounts - for _, m := range c.Mounts { - mount := types.Mount{ - Target: m.Target, - Source: m.Source, - Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), - ReadOnly: m.ReadOnly, - } - - if m.BindOptions != nil { - mount.BindOptions = &types.BindOptions{ - Propagation: types.MountPropagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), - } - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &types.VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &types.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - if c.StopGracePeriod != nil { - grace, _ := ptypes.Duration(c.StopGracePeriod) - containerSpec.StopGracePeriod = &grace - } - return containerSpec -} - -func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { - containerSpec := &swarmapi.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Env: c.Env, - Dir: c.Dir, - User: c.User, - } - - if c.StopGracePeriod != nil { - containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) - } - - // Mounts - for _, m := range c.Mounts { - mount := swarmapi.Mount{ - Target: m.Target, - Source: m.Source, - ReadOnly: m.ReadOnly, - } - - if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { - mount.Type = swarmapi.Mount_MountType(mountType) - } else if string(m.Type) != "" { - return nil, fmt.Errorf("invalid MountType: %q", m.Type) - } - - if m.BindOptions != nil { - if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { - mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} - } else if string(m.BindOptions.Propagation) != "" { - return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) - - } - - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - return containerSpec, nil -} diff --git a/daemon/cluster/convert/network.go b/daemon/cluster/convert/network.go deleted file mode 100644 index 6bb9a8185..000000000 --- a/daemon/cluster/convert/network.go +++ /dev/null @@ -1,199 +0,0 @@ -package convert - -import ( - "strings" - - basictypes "github.com/docker/engine-api/types" - networktypes "github.com/docker/engine-api/types/network" - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { - if na != nil { - return types.NetworkAttachment{ - Network: networkFromGRPC(na.Network), - Addresses: na.Addresses, - } - } - return types.NetworkAttachment{} -} - -func networkFromGRPC(n *swarmapi.Network) types.Network { - if n != nil { - network := types.Network{ - ID: n.ID, - Spec: types.NetworkSpec{ - IPv6Enabled: n.Spec.Ipv6Enabled, - Internal: n.Spec.Internal, - IPAMOptions: ipamFromGRPC(n.Spec.IPAM), - }, - IPAMOptions: ipamFromGRPC(n.IPAM), - } - - // Meta - network.Version.Index = n.Meta.Version.Index - network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) - - //Annotations - network.Spec.Name = n.Spec.Annotations.Name - network.Spec.Labels = n.Spec.Annotations.Labels - - //DriverConfiguration - if n.Spec.DriverConfig != nil { - network.Spec.DriverConfiguration = &types.Driver{ - Name: n.Spec.DriverConfig.Name, - Options: n.Spec.DriverConfig.Options, - } - } - - //DriverState - if n.DriverState != nil { - network.DriverState = types.Driver{ - Name: n.DriverState.Name, - Options: n.DriverState.Options, - } - } - - return network - } - return types.Network{} -} - -func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { - var ipam *types.IPAMOptions - if i != nil { - ipam = &types.IPAMOptions{} - if i.Driver != nil { - ipam.Driver.Name = i.Driver.Name - ipam.Driver.Options = i.Driver.Options - } - - for _, config := range i.Configs { - ipam.Configs = append(ipam.Configs, types.IPAMConfig{ - Subnet: config.Subnet, - Range: config.Range, - Gateway: config.Gateway, - }) - } - } - return ipam -} - -func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { - var endpointSpec *types.EndpointSpec - if es != nil { - endpointSpec = &types.EndpointSpec{} - endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) - - for _, portState := range es.Ports { - endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) - } - } - return endpointSpec -} - -func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { - endpoint := types.Endpoint{} - if e != nil { - if espec := endpointSpecFromGRPC(e.Spec); espec != nil { - endpoint.Spec = *espec - } - - for _, portState := range e.Ports { - endpoint.Ports = append(endpoint.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) - } - - for _, v := range e.VirtualIPs { - endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ - NetworkID: v.NetworkID, - Addr: v.Addr}) - } - - } - - return endpoint -} - -// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. -func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { - spec := n.Spec - var ipam networktypes.IPAM - if spec.IPAM != nil { - if spec.IPAM.Driver != nil { - ipam.Driver = spec.IPAM.Driver.Name - ipam.Options = spec.IPAM.Driver.Options - } - ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) - for _, ic := range spec.IPAM.Configs { - ipamConfig := networktypes.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - AuxAddress: ic.Reserved, - } - ipam.Config = append(ipam.Config, ipamConfig) - } - } - - nr := basictypes.NetworkResource{ - ID: n.ID, - Name: n.Spec.Annotations.Name, - Scope: "swarm", - EnableIPv6: spec.Ipv6Enabled, - IPAM: ipam, - Internal: spec.Internal, - Labels: n.Spec.Annotations.Labels, - } - - if n.DriverState != nil { - nr.Driver = n.DriverState.Name - nr.Options = n.DriverState.Options - } - - return nr -} - -// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. -func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { - ns := swarmapi.NetworkSpec{ - Annotations: swarmapi.Annotations{ - Name: create.Name, - Labels: create.Labels, - }, - DriverConfig: &swarmapi.Driver{ - Name: create.Driver, - Options: create.Options, - }, - Ipv6Enabled: create.EnableIPv6, - Internal: create.Internal, - IPAM: &swarmapi.IPAMOptions{ - Driver: &swarmapi.Driver{ - Name: create.IPAM.Driver, - Options: create.IPAM.Options, - }, - }, - } - ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) - for _, ipamConfig := range create.IPAM.Config { - ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ - Subnet: ipamConfig.Subnet, - Range: ipamConfig.IPRange, - Gateway: ipamConfig.Gateway, - }) - } - ns.IPAM.Configs = ipamSpec - return ns -} diff --git a/daemon/cluster/convert/node.go b/daemon/cluster/convert/node.go deleted file mode 100644 index 04c40ff11..000000000 --- a/daemon/cluster/convert/node.go +++ /dev/null @@ -1,95 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// NodeFromGRPC converts a grpc Node to a Node. -func NodeFromGRPC(n swarmapi.Node) types.Node { - node := types.Node{ - ID: n.ID, - Spec: types.NodeSpec{ - Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), - Membership: types.NodeMembership(strings.ToLower(n.Spec.Membership.String())), - Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), - }, - Status: types.NodeStatus{ - State: types.NodeState(strings.ToLower(n.Status.State.String())), - Message: n.Status.Message, - }, - } - - // Meta - node.Version.Index = n.Meta.Version.Index - node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) - - //Annotations - node.Spec.Name = n.Spec.Annotations.Name - node.Spec.Labels = n.Spec.Annotations.Labels - - //Description - if n.Description != nil { - node.Description.Hostname = n.Description.Hostname - if n.Description.Platform != nil { - node.Description.Platform.Architecture = n.Description.Platform.Architecture - node.Description.Platform.OS = n.Description.Platform.OS - } - if n.Description.Resources != nil { - node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs - node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes - } - if n.Description.Engine != nil { - node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion - node.Description.Engine.Labels = n.Description.Engine.Labels - for _, plugin := range n.Description.Engine.Plugins { - node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) - } - } - } - - //Manager - if n.ManagerStatus != nil { - node.ManagerStatus = &types.ManagerStatus{ - Leader: n.ManagerStatus.Leader, - Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), - Addr: n.ManagerStatus.Addr, - } - } - - return node -} - -// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. -func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { - spec := swarmapi.NodeSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - } - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { - spec.Role = swarmapi.NodeRole(role) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) - } - - if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok { - spec.Membership = swarmapi.NodeSpec_Membership(membership) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership) - } - - if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { - spec.Availability = swarmapi.NodeSpec_Availability(availability) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) - } - - return spec, nil -} diff --git a/daemon/cluster/convert/service.go b/daemon/cluster/convert/service.go deleted file mode 100644 index 912d60135..000000000 --- a/daemon/cluster/convert/service.go +++ /dev/null @@ -1,253 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/namesgenerator" - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// ServiceFromGRPC converts a grpc Service to a Service. -func ServiceFromGRPC(s swarmapi.Service) types.Service { - spec := s.Spec - containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container - - networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) - for _, n := range spec.Networks { - networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) - } - service := types.Service{ - ID: s.ID, - - Spec: types.ServiceSpec{ - TaskTemplate: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(s.Spec.Task.Resources), - RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart), - Placement: placementFromGRPC(s.Spec.Task.Placement), - }, - - Networks: networks, - EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint), - }, - Endpoint: endpointFromGRPC(s.Endpoint), - } - - // Meta - service.Version.Index = s.Meta.Version.Index - service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) - service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) - - // Annotations - service.Spec.Name = s.Spec.Annotations.Name - service.Spec.Labels = s.Spec.Annotations.Labels - - // UpdateConfig - if s.Spec.Update != nil { - service.Spec.UpdateConfig = &types.UpdateConfig{ - Parallelism: s.Spec.Update.Parallelism, - } - - service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay) - } - - //Mode - switch t := s.Spec.GetMode().(type) { - case *swarmapi.ServiceSpec_Global: - service.Spec.Mode.Global = &types.GlobalService{} - case *swarmapi.ServiceSpec_Replicated: - service.Spec.Mode.Replicated = &types.ReplicatedService{ - Replicas: &t.Replicated.Replicas, - } - } - - return service -} - -// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. -func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { - name := s.Name - if name == "" { - name = namesgenerator.GetRandomName(0) - } - - networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks)) - for _, n := range s.Networks { - networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) - } - - spec := swarmapi.ServiceSpec{ - Annotations: swarmapi.Annotations{ - Name: name, - Labels: s.Labels, - }, - Task: swarmapi.TaskSpec{ - Resources: resourcesToGRPC(s.TaskTemplate.Resources), - }, - Networks: networks, - } - - containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} - - restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Restart = restartPolicy - - if s.TaskTemplate.Placement != nil { - spec.Task.Placement = &swarmapi.Placement{ - Constraints: s.TaskTemplate.Placement.Constraints, - } - } - - if s.UpdateConfig != nil { - spec.Update = &swarmapi.UpdateConfig{ - Parallelism: s.UpdateConfig.Parallelism, - Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), - } - } - - if s.EndpointSpec != nil { - if s.EndpointSpec.Mode != "" && - s.EndpointSpec.Mode != types.ResolutionModeVIP && - s.EndpointSpec.Mode != types.ResolutionModeDNSRR { - return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) - } - - spec.Endpoint = &swarmapi.EndpointSpec{} - - spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) - - for _, portConfig := range s.EndpointSpec.Ports { - spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ - Name: portConfig.Name, - Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - } - - //Mode - if s.Mode.Global != nil { - spec.Mode = &swarmapi.ServiceSpec_Global{ - Global: &swarmapi.GlobalService{}, - } - } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, - } - } else { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: 1}, - } - } - - return spec, nil -} - -func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { - var resources *types.ResourceRequirements - if res != nil { - resources = &types.ResourceRequirements{} - if res.Limits != nil { - resources.Limits = &types.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - resources.Reservations = &types.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - } - } - } - - return resources -} - -func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { - var reqs *swarmapi.ResourceRequirements - if res != nil { - reqs = &swarmapi.ResourceRequirements{} - if res.Limits != nil { - reqs.Limits = &swarmapi.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - reqs.Reservations = &swarmapi.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - } - - } - } - return reqs -} - -func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { - var rp *types.RestartPolicy - if p != nil { - rp = &types.RestartPolicy{} - rp.Condition = types.RestartPolicyCondition(strings.ToLower(p.Condition.String())) - if p.Delay != nil { - delay, _ := ptypes.Duration(p.Delay) - rp.Delay = &delay - } - if p.Window != nil { - window, _ := ptypes.Duration(p.Window) - rp.Window = &window - } - - rp.MaxAttempts = &p.MaxAttempts - } - return rp -} - -func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { - var rp *swarmapi.RestartPolicy - if p != nil { - rp = &swarmapi.RestartPolicy{} - sanatizedCondition := strings.ToUpper(strings.Replace(string(p.Condition), "-", "_", -1)) - if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[sanatizedCondition]; ok { - rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition) - } else if string(p.Condition) == "" { - rp.Condition = swarmapi.RestartOnAny - } else { - return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) - } - - if p.Delay != nil { - rp.Delay = ptypes.DurationProto(*p.Delay) - } - if p.Window != nil { - rp.Window = ptypes.DurationProto(*p.Window) - } - if p.MaxAttempts != nil { - rp.MaxAttempts = *p.MaxAttempts - - } - } - return rp, nil -} - -func placementFromGRPC(p *swarmapi.Placement) *types.Placement { - var r *types.Placement - if p != nil { - r = &types.Placement{} - r.Constraints = p.Constraints - } - - return r -} diff --git a/daemon/cluster/convert/swarm.go b/daemon/cluster/convert/swarm.go deleted file mode 100644 index 331c199c8..000000000 --- a/daemon/cluster/convert/swarm.go +++ /dev/null @@ -1,166 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/crypto/bcrypt" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// SwarmFromGRPC converts a grpc Cluster to a Swarm. -func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { - swarm := types.Swarm{ - ID: c.ID, - Spec: types.Spec{ - Orchestration: types.OrchestrationConfig{ - TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit, - }, - Raft: types.RaftConfig{ - SnapshotInterval: c.Spec.Raft.SnapshotInterval, - KeepOldSnapshots: c.Spec.Raft.KeepOldSnapshots, - LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, - HeartbeatTick: c.Spec.Raft.HeartbeatTick, - ElectionTick: c.Spec.Raft.ElectionTick, - }, - }, - } - - heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) - swarm.Spec.Dispatcher.HeartbeatPeriod = uint64(heartbeatPeriod) - - swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) - - for _, ca := range c.Spec.CAConfig.ExternalCAs { - swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ - Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), - URL: ca.URL, - Options: ca.Options, - }) - } - - // Meta - swarm.Version.Index = c.Meta.Version.Index - swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) - swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) - - // Annotations - swarm.Spec.Name = c.Spec.Annotations.Name - swarm.Spec.Labels = c.Spec.Annotations.Labels - - for _, policy := range c.Spec.AcceptancePolicy.Policies { - p := types.Policy{ - Role: types.NodeRole(strings.ToLower(policy.Role.String())), - Autoaccept: policy.Autoaccept, - } - if policy.Secret != nil { - secret := string(policy.Secret.Data) - p.Secret = &secret - } - swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p) - } - - return swarm -} - -// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided. -func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { - spec := swarmapi.ClusterSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - Orchestration: swarmapi.OrchestrationConfig{ - TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit, - }, - Raft: swarmapi.RaftConfig{ - SnapshotInterval: s.Raft.SnapshotInterval, - KeepOldSnapshots: s.Raft.KeepOldSnapshots, - LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers, - HeartbeatTick: s.Raft.HeartbeatTick, - ElectionTick: s.Raft.ElectionTick, - }, - Dispatcher: swarmapi.DispatcherConfig{ - HeartbeatPeriod: ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)), - }, - CAConfig: swarmapi.CAConfig{ - NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry), - }, - } - - for _, ca := range s.CAConfig.ExternalCAs { - protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] - if !ok { - return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) - } - spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ - Protocol: swarmapi.ExternalCA_CAProtocol(protocol), - URL: ca.URL, - Options: ca.Options, - }) - } - - if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil { - return swarmapi.ClusterSpec{}, err - } - - return spec, nil -} - -// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy. -func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy, oldSpec *swarmapi.ClusterSpec) error { - spec.AcceptancePolicy.Policies = nil - hashs := make(map[string][]byte) - - for _, p := range acceptancePolicy.Policies { - role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))] - if !ok { - return fmt.Errorf("invalid Role: %q", p.Role) - } - - policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{ - Role: swarmapi.NodeRole(role), - Autoaccept: p.Autoaccept, - } - - if p.Secret != nil { - if *p.Secret == "" { // if provided secret is empty, it means erase previous secret. - policy.Secret = nil - } else { // if provided secret is not empty, we generate a new one. - hashPwd, ok := hashs[*p.Secret] - if !ok { - hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0) - hashs[*p.Secret] = hashPwd - } - policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{ - Data: hashPwd, - Alg: "bcrypt", - } - } - } else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one. - policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{ - Data: oldSecret.Data, - Alg: oldSecret.Alg, - } - } - - spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy) - } - return nil -} - -func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret { - if oldSpec == nil { - return nil - } - for _, p := range oldSpec.AcceptancePolicy.Policies { - if p.Role == role { - return p.Secret - } - } - return nil -} diff --git a/daemon/cluster/convert/task.go b/daemon/cluster/convert/task.go deleted file mode 100644 index b701ae36c..000000000 --- a/daemon/cluster/convert/task.go +++ /dev/null @@ -1,53 +0,0 @@ -package convert - -import ( - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// TaskFromGRPC converts a grpc Task to a Task. -func TaskFromGRPC(t swarmapi.Task) types.Task { - containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container - containerStatus := t.Status.GetContainer() - task := types.Task{ - ID: t.ID, - ServiceID: t.ServiceID, - Slot: int(t.Slot), - NodeID: t.NodeID, - Spec: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(t.Spec.Resources), - RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), - Placement: placementFromGRPC(t.Spec.Placement), - }, - Status: types.TaskStatus{ - State: types.TaskState(strings.ToLower(t.Status.State.String())), - Message: t.Status.Message, - Err: t.Status.Err, - }, - DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), - } - - // Meta - task.Version.Index = t.Meta.Version.Index - task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) - task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) - - task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) - - if containerStatus != nil { - task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID - task.Status.ContainerStatus.PID = int(containerStatus.PID) - task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) - } - - // NetworksAttachments - for _, na := range t.Networks { - task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) - } - - return task -} diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go deleted file mode 100644 index 3a4ff0fc1..000000000 --- a/daemon/cluster/executor/backend.go +++ /dev/null @@ -1,41 +0,0 @@ -package executor - -import ( - "io" - "time" - - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork/cluster" - networktypes "github.com/docker/libnetwork/types" - "golang.org/x/net/context" -) - -// Backend defines the executor component for a swarm agent. -type Backend interface { - CreateManagedNetwork(clustertypes.NetworkCreateRequest) error - DeleteManagedNetwork(name string) error - SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - CreateManagedContainer(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error - ContainerStop(name string, seconds int) error - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error - ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) - ContainerWaitWithContext(ctx context.Context, name string) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerKill(name string, sig uint64) error - SystemInfo() (*types.Info, error) - VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) - ListContainersForNode(nodeID string) []string - SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error - SetClusterProvider(provider cluster.Provider) - IsSwarmCompatible() error - SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(listener chan interface{}) -} diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go deleted file mode 100644 index 38ff63afc..000000000 --- a/daemon/cluster/executor/container/adapter.go +++ /dev/null @@ -1,273 +0,0 @@ -package container - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/versions" - "github.com/docker/libnetwork" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - "golang.org/x/net/context" -) - -// containerAdapter conducts remote operations for a container. All calls -// are mostly naked calls to the client API, seeded with information from -// containerConfig. -type containerAdapter struct { - backend executorpkg.Backend - container *containerConfig -} - -func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) { - ctnr, err := newContainerConfig(task) - if err != nil { - return nil, err - } - - return &containerAdapter{ - container: ctnr, - backend: b, - }, nil -} - -func (c *containerAdapter) pullImage(ctx context.Context) error { - spec := c.container.spec() - - // if the image needs to be pulled, the auth config will be retrieved and updated - var encodedAuthConfig string - if spec.PullOptions != nil { - encodedAuthConfig = spec.PullOptions.RegistryAuth - } - - authConfig := &types.AuthConfig{} - if encodedAuthConfig != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - pr, pw := io.Pipe() - metaHeaders := map[string][]string{} - go func() { - err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) - pw.CloseWithError(err) - }() - - dec := json.NewDecoder(pr) - m := map[string]interface{}{} - for { - if err := dec.Decode(&m); err != nil { - if err == io.EOF { - break - } - return err - } - // TODO(stevvooe): Report this status somewhere. - logrus.Debugln("pull progress", m) - } - // if the final stream object contained an error, return it - if errMsg, ok := m["error"]; ok { - return fmt.Errorf("%v", errMsg) - } - return nil -} - -func (c *containerAdapter) createNetworks(ctx context.Context) error { - for _, network := range c.container.networks() { - ncr, err := c.container.networkCreateRequest(network) - if err != nil { - return err - } - - if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing - if _, ok := err.(libnetwork.NetworkNameError); ok { - continue - } - - return err - } - } - - return nil -} - -func (c *containerAdapter) removeNetworks(ctx context.Context) error { - for _, nid := range c.container.networks() { - if err := c.backend.DeleteManagedNetwork(nid); err != nil { - if _, ok := err.(*libnetwork.ActiveEndpointsError); ok { - continue - } - log.G(ctx).Errorf("network %s remove failed: %v", nid, err) - return err - } - } - - return nil -} - -func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error { - var cr types.ContainerCreateResponse - var err error - version := httputils.VersionFromContext(ctx) - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - - if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{ - Name: c.container.name(), - Config: c.container.config(), - HostConfig: c.container.hostConfig(), - // Use the first network in container create - NetworkingConfig: c.container.createNetworkingConfig(), - }, validateHostname); err != nil { - return err - } - - // Docker daemon currently doesn't support multiple networks in container create - // Connect to all other networks - nc := c.container.connectNetworkingConfig() - - if nc != nil { - for n, ep := range nc.EndpointsConfig { - if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { - return err - } - } - } - - if err := backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { - return err - } - - return nil -} - -func (c *containerAdapter) start(ctx context.Context) error { - version := httputils.VersionFromContext(ctx) - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - return c.backend.ContainerStart(c.container.name(), nil, validateHostname) -} - -func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { - cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) - if ctx.Err() != nil { - return types.ContainerJSON{}, ctx.Err() - } - if err != nil { - return types.ContainerJSON{}, err - } - return *cs, nil -} - -// events issues a call to the events API and returns a channel with all -// events. The stream of events can be shutdown by cancelling the context. -func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { - log.G(ctx).Debugf("waiting on events") - buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) - eventsq := make(chan events.Message, len(buffer)) - - for _, event := range buffer { - eventsq <- event - } - - go func() { - defer c.backend.UnsubscribeFromEvents(l) - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - log.G(ctx).Warnf("unexpected event message: %q", ev) - continue - } - select { - case eventsq <- jev: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return eventsq -} - -func (c *containerAdapter) wait(ctx context.Context) error { - return c.backend.ContainerWaitWithContext(ctx, c.container.name()) -} - -func (c *containerAdapter) shutdown(ctx context.Context) error { - // Default stop grace period to 10s. - stopgrace := 10 - spec := c.container.spec() - if spec.StopGracePeriod != nil { - stopgrace = int(spec.StopGracePeriod.Seconds) - } - return c.backend.ContainerStop(c.container.name(), stopgrace) -} - -func (c *containerAdapter) terminate(ctx context.Context) error { - return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) -} - -func (c *containerAdapter) remove(ctx context.Context) error { - return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ - RemoveVolume: true, - ForceRemove: true, - }) -} - -func (c *containerAdapter) createVolumes(ctx context.Context, backend executorpkg.Backend) error { - // Create plugin volumes that are embedded inside a Mount - for _, mount := range c.container.task.Spec.GetContainer().Mounts { - if mount.Type != api.MountTypeVolume { - continue - } - - if mount.VolumeOptions == nil { - continue - } - - if mount.VolumeOptions.DriverConfig == nil { - continue - } - - req := c.container.volumeCreateRequest(&mount) - - // Check if this volume exists on the engine - if _, err := backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { - // TODO(amitshukla): Today, volume create through the engine api does not return an error - // when the named volume with the same parameters already exists. - // It returns an error if the driver name is different - that is a valid error - return err - } - - } - - return nil -} - -// todo: typed/wrapped errors -func isContainerCreateNameConflict(err error) bool { - return strings.Contains(err.Error(), "Conflict. The name") -} - -func isUnknownContainer(err error) bool { - return strings.Contains(err.Error(), "No such container:") -} - -func isStoppedContainer(err error) bool { - return strings.Contains(err.Error(), "is already stopped") -} diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go deleted file mode 100644 index 31782be6d..000000000 --- a/daemon/cluster/executor/container/container.go +++ /dev/null @@ -1,504 +0,0 @@ -package container - -import ( - "errors" - "fmt" - "net" - "strings" - "time" - - "github.com/Sirupsen/logrus" - - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - enginecontainer "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" -) - -const ( - // Explicitly use the kernel's default setting for CPU quota of 100ms. - // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt - cpuQuotaPeriod = 100 * time.Millisecond - - // systemLabelPrefix represents the reserved namespace for system labels. - systemLabelPrefix = "com.docker.swarm" -) - -// containerConfig converts task properties into docker container compatible -// components. -type containerConfig struct { - task *api.Task - networksAttachments map[string]*api.NetworkAttachment -} - -// newContainerConfig returns a validated container config. No methods should -// return an error if this function returns without error. -func newContainerConfig(t *api.Task) (*containerConfig, error) { - var c containerConfig - return &c, c.setTask(t) -} - -func (c *containerConfig) setTask(t *api.Task) error { - container := t.Spec.GetContainer() - if container == nil { - return exec.ErrRuntimeUnsupported - } - - if container.Image == "" { - return ErrImageRequired - } - - // index the networks by name - c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) - for _, attachment := range t.Networks { - c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment - } - - c.task = t - return nil -} - -func (c *containerConfig) endpoint() *api.Endpoint { - return c.task.Endpoint -} - -func (c *containerConfig) spec() *api.ContainerSpec { - return c.task.Spec.GetContainer() -} - -func (c *containerConfig) name() string { - if c.task.Annotations.Name != "" { - // if set, use the container Annotations.Name field, set in the orchestrator. - return c.task.Annotations.Name - } - - // fallback to service.slot.id. - return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".") -} - -func (c *containerConfig) image() string { - raw := c.spec().Image - ref, err := reference.ParseNamed(raw) - if err != nil { - return raw - } - return reference.WithDefaultTag(ref).String() -} - -func (c *containerConfig) config() *enginecontainer.Config { - config := &enginecontainer.Config{ - Labels: c.labels(), - User: c.spec().User, - Env: c.spec().Env, - WorkingDir: c.spec().Dir, - Image: c.image(), - Volumes: c.volumes(), - } - - if len(c.spec().Command) > 0 { - // If Command is provided, we replace the whole invocation with Command - // by replacing Entrypoint and specifying Cmd. Args is ignored in this - // case. - config.Entrypoint = append(config.Entrypoint, c.spec().Command...) - config.Cmd = append(config.Cmd, c.spec().Args...) - } else if len(c.spec().Args) > 0 { - // In this case, we assume the image has an Entrypoint and Args - // specifies the arguments for that entrypoint. - config.Cmd = c.spec().Args - } - - return config -} - -func (c *containerConfig) labels() map[string]string { - var ( - system = map[string]string{ - "task": "", // mark as cluster task - "task.id": c.task.ID, - "task.name": fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot), - "node.id": c.task.NodeID, - "service.id": c.task.ServiceID, - "service.name": c.task.ServiceAnnotations.Name, - } - labels = make(map[string]string) - ) - - // base labels are those defined in the spec. - for k, v := range c.spec().Labels { - labels[k] = v - } - - // we then apply the overrides from the task, which may be set via the - // orchestrator. - for k, v := range c.task.Annotations.Labels { - labels[k] = v - } - - // finally, we apply the system labels, which override all labels. - for k, v := range system { - labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v - } - - return labels -} - -// volumes gets placed into the Volumes field on the containerConfig. -func (c *containerConfig) volumes() map[string]struct{} { - r := make(map[string]struct{}) - // Volumes *only* creates anonymous volumes. The rest is mixed in with - // binds, which aren't actually binds. Basically, any volume that - // results in a single component must be added here. - // - // This is reversed engineered from the behavior of the engine API. - for _, mount := range c.spec().Mounts { - if mount.Type == api.MountTypeVolume && mount.Source == "" { - r[mount.Target] = struct{}{} - } - } - return r -} - -func (c *containerConfig) tmpfs() map[string]string { - r := make(map[string]string) - - for _, spec := range c.spec().Mounts { - if spec.Type != api.MountTypeTmpfs { - continue - } - - r[spec.Target] = getMountMask(&spec) - } - - return r -} - -func (c *containerConfig) binds() []string { - var r []string - for _, mount := range c.spec().Mounts { - if mount.Type == api.MountTypeBind || (mount.Type == api.MountTypeVolume && mount.Source != "") { - spec := fmt.Sprintf("%s:%s", mount.Source, mount.Target) - mask := getMountMask(&mount) - if mask != "" { - spec = fmt.Sprintf("%s:%s", spec, mask) - } - r = append(r, spec) - } - } - return r -} - -func getMountMask(m *api.Mount) string { - var maskOpts []string - if m.ReadOnly { - maskOpts = append(maskOpts, "ro") - } - - switch m.Type { - case api.MountTypeVolume: - if m.VolumeOptions != nil && m.VolumeOptions.NoCopy { - maskOpts = append(maskOpts, "nocopy") - } - case api.MountTypeBind: - if m.BindOptions == nil { - break - } - - switch m.BindOptions.Propagation { - case api.MountPropagationPrivate: - maskOpts = append(maskOpts, "private") - case api.MountPropagationRPrivate: - maskOpts = append(maskOpts, "rprivate") - case api.MountPropagationShared: - maskOpts = append(maskOpts, "shared") - case api.MountPropagationRShared: - maskOpts = append(maskOpts, "rshared") - case api.MountPropagationSlave: - maskOpts = append(maskOpts, "slave") - case api.MountPropagationRSlave: - maskOpts = append(maskOpts, "rslave") - } - case api.MountTypeTmpfs: - if m.TmpfsOptions == nil { - break - } - - if m.TmpfsOptions.Mode != 0 { - maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode)) - } - - if m.TmpfsOptions.SizeBytes != 0 { - // calculate suffix here, making this linux specific, but that is - // okay, since API is that way anyways. - - // we do this by finding the suffix that divides evenly into the - // value, returing the value itself, with no suffix, if it fails. - // - // For the most part, we don't enforce any semantic to this values. - // The operating system will usually align this and enforce minimum - // and maximums. - var ( - size = m.TmpfsOptions.SizeBytes - suffix string - ) - for _, r := range []struct { - suffix string - divisor int64 - }{ - {"g", 1 << 30}, - {"m", 1 << 20}, - {"k", 1 << 10}, - } { - if size%r.divisor == 0 { - size = size / r.divisor - suffix = r.suffix - break - } - } - - maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix)) - } - } - - return strings.Join(maskOpts, ",") -} - -func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { - hc := &enginecontainer.HostConfig{ - Resources: c.resources(), - Binds: c.binds(), - Tmpfs: c.tmpfs(), - } - - if c.task.LogDriver != nil { - hc.LogConfig = enginecontainer.LogConfig{ - Type: c.task.LogDriver.Name, - Config: c.task.LogDriver.Options, - } - } - - return hc -} - -// This handles the case of volumes that are defined inside a service Mount -func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest { - var ( - driverName string - driverOpts map[string]string - labels map[string]string - ) - - if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { - driverName = mount.VolumeOptions.DriverConfig.Name - driverOpts = mount.VolumeOptions.DriverConfig.Options - labels = mount.VolumeOptions.Labels - } - - if mount.VolumeOptions != nil { - return &types.VolumeCreateRequest{ - Name: mount.Source, - Driver: driverName, - DriverOpts: driverOpts, - Labels: labels, - } - } - return nil -} - -func (c *containerConfig) resources() enginecontainer.Resources { - resources := enginecontainer.Resources{} - - // If no limits are specified let the engine use its defaults. - // - // TODO(aluzzardi): We might want to set some limits anyway otherwise - // "unlimited" tasks will step over the reservation of other tasks. - r := c.task.Spec.Resources - if r == nil || r.Limits == nil { - return resources - } - - if r.Limits.MemoryBytes > 0 { - resources.Memory = r.Limits.MemoryBytes - } - - if r.Limits.NanoCPUs > 0 { - // CPU Period must be set in microseconds. - resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) - resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 - } - - return resources -} - -// Docker daemon supports just 1 network during container create. -func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil { - networks = c.task.Networks - } - - epConfig := make(map[string]*network.EndpointSettings) - if len(networks) > 0 { - epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) - } - - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create -func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil { - networks = c.task.Networks - } - - // First network is used during container create. Other networks are used in "docker network connect" - if len(networks) < 2 { - return nil - } - - epConfig := make(map[string]*network.EndpointSettings) - for _, na := range networks[1:] { - epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) - } - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { - var ipv4, ipv6 string - for _, addr := range na.Addresses { - ip, _, err := net.ParseCIDR(addr) - if err != nil { - continue - } - - if ip.To4() != nil { - ipv4 = ip.String() - continue - } - - if ip.To16() != nil { - ipv6 = ip.String() - } - } - - return &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: ipv4, - IPv6Address: ipv6, - }, - } -} - -func (c *containerConfig) virtualIP(networkID string) string { - if c.task.Endpoint == nil { - return "" - } - - for _, eVip := range c.task.Endpoint.VirtualIPs { - // We only support IPv4 VIPs for now. - if eVip.NetworkID == networkID { - vip, _, err := net.ParseCIDR(eVip.Addr) - if err != nil { - return "" - } - - return vip.String() - } - } - - return "" -} - -func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { - if len(c.task.Networks) == 0 { - return nil - } - - logrus.Debugf("Creating service config in agent for t = %+v", c.task) - svcCfg := &clustertypes.ServiceConfig{ - Name: c.task.ServiceAnnotations.Name, - Aliases: make(map[string][]string), - ID: c.task.ServiceID, - VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), - } - - for _, na := range c.task.Networks { - svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ - // We support only IPv4 virtual IP for now. - IPv4: c.virtualIP(na.Network.ID), - } - if len(na.Aliases) > 0 { - svcCfg.Aliases[na.Network.ID] = na.Aliases - } - } - - if c.task.Endpoint != nil { - for _, ePort := range c.task.Endpoint.Ports { - svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ - Name: ePort.Name, - Protocol: int32(ePort.Protocol), - TargetPort: ePort.TargetPort, - PublishedPort: ePort.PublishedPort, - }) - } - } - - return svcCfg -} - -// networks returns a list of network names attached to the container. The -// returned name can be used to lookup the corresponding network create -// options. -func (c *containerConfig) networks() []string { - var networks []string - - for name := range c.networksAttachments { - networks = append(networks, name) - } - - return networks -} - -func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { - na, ok := c.networksAttachments[name] - if !ok { - return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") - } - - options := types.NetworkCreate{ - // ID: na.Network.ID, - Driver: na.Network.DriverState.Name, - IPAM: network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - }, - Options: na.Network.DriverState.Options, - Labels: na.Network.Spec.Annotations.Labels, - Internal: na.Network.Spec.Internal, - EnableIPv6: na.Network.Spec.Ipv6Enabled, - CheckDuplicate: true, - } - - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - - return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil -} - -func (c containerConfig) eventFilter() filters.Args { - filter := filters.NewArgs() - filter.Add("type", events.ContainerEventType) - filter.Add("name", c.name()) - filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) - return filter -} diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go deleted file mode 100644 index a51e5c52e..000000000 --- a/daemon/cluster/executor/container/controller.go +++ /dev/null @@ -1,349 +0,0 @@ -package container - -import ( - "fmt" - "os" - - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// controller implements agent.Controller against docker's API. -// -// Most operations against docker's API are done through the container name, -// which is unique to the task. -type controller struct { - backend executorpkg.Backend - task *api.Task - adapter *containerAdapter - closed chan struct{} - err error -} - -var _ exec.Controller = &controller{} - -// NewController returns a dockerexec runner for the provided task. -func newController(b executorpkg.Backend, task *api.Task) (*controller, error) { - adapter, err := newContainerAdapter(b, task) - if err != nil { - return nil, err - } - - return &controller{ - backend: b, - task: task, - adapter: adapter, - closed: make(chan struct{}), - }, nil -} - -func (r *controller) Task() (*api.Task, error) { - return r.task, nil -} - -// ContainerStatus returns the container-specific status for the task. -func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - if isUnknownContainer(err) { - return nil, nil - } - return nil, err - } - return parseContainerStatus(ctnr) -} - -// Update tasks a recent task update and applies it to the container. -func (r *controller) Update(ctx context.Context, t *api.Task) error { - // TODO(stevvooe): While assignment of tasks is idempotent, we do allow - // updates of metadata, such as labelling, as well as any other properties - // that make sense. - return nil -} - -// Prepare creates a container and ensures the image is pulled. -// -// If the container has already be created, exec.ErrTaskPrepared is returned. -func (r *controller) Prepare(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - // Make sure all the networks that the task needs are created. - if err := r.adapter.createNetworks(ctx); err != nil { - return err - } - - // Make sure all the volumes that the task needs are created. - if err := r.adapter.createVolumes(ctx, r.backend); err != nil { - return err - } - - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - if err := r.adapter.pullImage(ctx); err != nil { - // NOTE(stevvooe): We always try to pull the image to make sure we have - // the most up to date version. This will return an error, but we only - // log it. If the image truly doesn't exist, the create below will - // error out. - // - // This gives us some nice behavior where we use up to date versions of - // mutable tags, but will still run if the old image is available but a - // registry is down. - // - // If you don't want this behavior, lock down your image to an - // immutable tag or digest. - log.G(ctx).WithError(err).Error("pulling image failed") - } - } - - if err := r.adapter.create(ctx, r.backend); err != nil { - if isContainerCreateNameConflict(err) { - if _, err := r.adapter.inspect(ctx); err != nil { - return err - } - - // container is already created. success! - return exec.ErrTaskPrepared - } - - return err - } - - return nil -} - -// Start the container. An error will be returned if the container is already started. -func (r *controller) Start(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - return err - } - - // Detect whether the container has *ever* been started. If so, we don't - // issue the start. - // - // TODO(stevvooe): This is very racy. While reading inspect, another could - // start the process and we could end up starting it twice. - if ctnr.State.Status != "created" { - return exec.ErrTaskStarted - } - - if err := r.adapter.start(ctx); err != nil { - return errors.Wrap(err, "starting container failed") - } - - return nil -} - -// Wait on the container to exit. -func (r *controller) Wait(pctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - healthErr := make(chan error, 1) - go func() { - ectx, cancel := context.WithCancel(ctx) // cancel event context on first event - defer cancel() - if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { - healthErr <- ErrContainerUnhealthy - if err := r.Shutdown(ectx); err != nil { - log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") - } - } - }() - - err := r.adapter.wait(ctx) - if ctx.Err() != nil { - return ctx.Err() - } - - if err != nil { - ee := &exitError{} - if ec, ok := err.(exec.ExitCoder); ok { - ee.code = ec.ExitCode() - } - select { - case e := <-healthErr: - ee.cause = e - default: - if err.Error() != "" { - ee.cause = err - } - } - return ee - } - - return nil -} - -// Shutdown the container cleanly. -func (r *controller) Shutdown(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if err := r.adapter.shutdown(ctx); err != nil { - if isUnknownContainer(err) || isStoppedContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Terminate the container, with force. -func (r *controller) Terminate(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if err := r.adapter.terminate(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Remove the container and its resources. -func (r *controller) Remove(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - // It may be necessary to shut down the task before removing it. - if err := r.Shutdown(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - // This may fail if the task was already shut down. - log.G(ctx).WithError(err).Debug("shutdown failed on removal") - } - - // Try removing networks referenced in this task in case this - // task is the last one referencing it - if err := r.adapter.removeNetworks(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - return err - } - - if err := r.adapter.remove(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - return nil -} - -// Close the runner and clean up any ephemeral resources. -func (r *controller) Close() error { - select { - case <-r.closed: - return r.err - default: - r.err = exec.ErrControllerClosed - close(r.closed) - } - return nil -} - -func (r *controller) matchevent(event events.Message) bool { - if event.Type != events.ContainerEventType { - return false - } - - // TODO(stevvooe): Filter based on ID matching, in addition to name. - - // Make sure the events are for this container. - if event.Actor.Attributes["name"] != r.adapter.container.name() { - return false - } - - return true -} - -func (r *controller) checkClosed() error { - select { - case <-r.closed: - return r.err - default: - return nil - } -} - -func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { - status := &api.ContainerStatus{ - ContainerID: ctnr.ID, - PID: int32(ctnr.State.Pid), - ExitCode: int32(ctnr.State.ExitCode), - } - - return status, nil -} - -type exitError struct { - code int - cause error -} - -func (e *exitError) Error() string { - if e.cause != nil { - return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) - } - - return fmt.Sprintf("task: non-zero exit (%v)", e.code) -} - -func (e *exitError) ExitCode() int { - return int(e.code) -} - -func (e *exitError) Cause() error { - return e.cause -} - -// checkHealth blocks until unhealthy container is detected or ctx exits -func (r *controller) checkHealth(ctx context.Context) error { - eventq := r.adapter.events(ctx) - - for { - select { - case <-ctx.Done(): - return nil - case <-r.closed: - return nil - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "health_status: unhealthy": - return ErrContainerUnhealthy - } - } - } -} diff --git a/daemon/cluster/executor/container/errors.go b/daemon/cluster/executor/container/errors.go deleted file mode 100644 index 63e123356..000000000 --- a/daemon/cluster/executor/container/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package container - -import "fmt" - -var ( - // ErrImageRequired returned if a task is missing the image definition. - ErrImageRequired = fmt.Errorf("dockerexec: image required") - - // ErrContainerDestroyed returned when a container is prematurely destroyed - // during a wait call. - ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") - - // ErrContainerUnhealthy returned if controller detects the health check failure - ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") -) diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go deleted file mode 100644 index bf5e248f6..000000000 --- a/daemon/cluster/executor/container/executor.go +++ /dev/null @@ -1,139 +0,0 @@ -package container - -import ( - "strings" - - executorpkg "github.com/docker/docker/daemon/cluster/executor" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - networktypes "github.com/docker/libnetwork/types" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -type executor struct { - backend executorpkg.Backend -} - -// NewExecutor returns an executor from the docker client. -func NewExecutor(b executorpkg.Backend) exec.Executor { - return &executor{ - backend: b, - } -} - -// Describe returns the underlying node description from the docker client. -func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { - info, err := e.backend.SystemInfo() - if err != nil { - return nil, err - } - - var plugins []api.PluginDescription - addPlugins := func(typ string, names []string) { - for _, name := range names { - plugins = append(plugins, api.PluginDescription{ - Type: typ, - Name: name, - }) - } - } - - addPlugins("Volume", info.Plugins.Volume) - // Add builtin driver "overlay" (the only builtin multi-host driver) to - // the plugin list by default. - addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) - addPlugins("Authorization", info.Plugins.Authorization) - - // parse []string labels into a map[string]string - labels := map[string]string{} - for _, l := range info.Labels { - stringSlice := strings.SplitN(l, "=", 2) - // this will take the last value in the list for a given key - // ideally, one shouldn't assign multiple values to the same key - if len(stringSlice) > 1 { - labels[stringSlice[0]] = stringSlice[1] - } - } - - description := &api.NodeDescription{ - Hostname: info.Name, - Platform: &api.Platform{ - Architecture: info.Architecture, - OS: info.OSType, - }, - Engine: &api.EngineDescription{ - EngineVersion: info.ServerVersion, - Labels: labels, - Plugins: plugins, - }, - Resources: &api.Resources{ - NanoCPUs: int64(info.NCPU) * 1e9, - MemoryBytes: info.MemTotal, - }, - } - - return description, nil -} - -func (e *executor) Configure(ctx context.Context, node *api.Node) error { - na := node.Attachment - if na == nil { - return nil - } - - options := types.NetworkCreate{ - Driver: na.Network.DriverState.Name, - IPAM: network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - }, - Options: na.Network.DriverState.Options, - CheckDuplicate: true, - } - - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - - return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ - na.Network.ID, - types.NetworkCreateRequest{ - Name: na.Network.Spec.Annotations.Name, - NetworkCreate: options, - }, - }, na.Addresses[0]) -} - -// Controller returns a docker container runner. -func (e *executor) Controller(t *api.Task) (exec.Controller, error) { - ctlr, err := newController(e.backend, t) - if err != nil { - return nil, err - } - - return ctlr, nil -} - -func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { - nwKeys := []*networktypes.EncryptionKey{} - for _, key := range keys { - nwKey := &networktypes.EncryptionKey{ - Subsystem: key.Subsystem, - Algorithm: int32(key.Algorithm), - Key: make([]byte, len(key.Key)), - LamportTime: key.LamportTime, - } - copy(nwKey.Key, key.Key) - nwKeys = append(nwKeys, nwKey) - } - e.backend.SetNetworkBootstrapKeys(nwKeys) - - return nil -} diff --git a/daemon/cluster/executor/container/health_test.go b/daemon/cluster/executor/container/health_test.go deleted file mode 100644 index 472624b54..000000000 --- a/daemon/cluster/executor/container/health_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build !windows - -package container - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/events" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -func TestHealthStates(t *testing.T) { - - // set up environment: events, task, container .... - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - task := &api.Task{ - ID: "id", - ServiceID: "sid", - Spec: api.TaskSpec{ - Runtime: &api.TaskSpec_Container{ - Container: &api.ContainerSpec{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, - }, - }, - }, - Annotations: api.Annotations{Name: "name"}, - } - - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "id", - Name: "name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, - }, - }, - } - - daemon := &daemon.Daemon{ - EventsService: e, - } - - controller, err := newController(daemon, task) - if err != nil { - t.Fatalf("create controller fail %v", err) - } - - errChan := make(chan error, 1) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // fire checkHealth - go func() { - err := controller.checkHealth(ctx) - select { - case errChan <- err: - case <-ctx.Done(): - } - }() - - // send an event and expect to get expectedErr - // if expectedErr is nil, shouldn't get any error - logAndExpect := func(msg string, expectedErr error) { - daemon.LogContainerEvent(c, msg) - - timer := time.NewTimer(1 * time.Second) - defer timer.Stop() - - select { - case err := <-errChan: - if err != expectedErr { - t.Fatalf("expect error %v, but get %v", expectedErr, err) - } - case <-timer.C: - if expectedErr != nil { - t.Fatalf("time limit exceeded, didn't get expected error") - } - } - } - - // events that are ignored by checkHealth - logAndExpect("health_status: running", nil) - logAndExpect("health_status: healthy", nil) - logAndExpect("die", nil) - - // unhealthy event will be caught by checkHealth - logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) -} diff --git a/daemon/cluster/filters.go b/daemon/cluster/filters.go deleted file mode 100644 index 26fc65627..000000000 --- a/daemon/cluster/filters.go +++ /dev/null @@ -1,98 +0,0 @@ -package cluster - -import ( - "fmt" - "strings" - - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/filters" - swarmapi "github.com/docker/swarmkit/api" -) - -func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "role": true, - "membership": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - f := &swarmapi.ListNodesRequest_Filters{ - Names: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - } - - for _, r := range filter.Get("role") { - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { - f.Roles = append(f.Roles, swarmapi.NodeRole(role)) - } else if r != "" { - return nil, fmt.Errorf("Invalid role filter: '%s'", r) - } - } - - for _, a := range filter.Get("membership") { - if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { - f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) - } else if a != "" { - return nil, fmt.Errorf("Invalid membership filter: '%s'", a) - } - } - - return f, nil -} - -func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - return &swarmapi.ListServicesRequest_Filters{ - Names: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - }, nil -} - -func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "service": true, - "node": true, - "desired-state": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - if transformFunc != nil { - if err := transformFunc(filter); err != nil { - return nil, err - } - } - f := &swarmapi.ListTasksRequest_Filters{ - Names: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - ServiceIDs: filter.Get("service"), - NodeIDs: filter.Get("node"), - } - - for _, s := range filter.Get("desired-state") { - if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { - f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) - } else if s != "" { - return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) - } - } - - return f, nil -} diff --git a/daemon/cluster/helpers.go b/daemon/cluster/helpers.go deleted file mode 100644 index be5bf56e8..000000000 --- a/daemon/cluster/helpers.go +++ /dev/null @@ -1,108 +0,0 @@ -package cluster - -import ( - "fmt" - - swarmapi "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { - rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return nil, err - } - - if len(rl.Clusters) == 0 { - return nil, fmt.Errorf("swarm not found") - } - - // TODO: assume one cluster only - return rl.Clusters[0], nil -} - -func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { - // GetNode to match via full ID. - rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) - if err != nil { - // If any error (including NotFound), ListNodes to match via full name. - rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) - - if err != nil || len(rl.Nodes) == 0 { - // If any error or 0 result, ListNodes to match via ID prefix. - rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Nodes) == 0 { - return nil, fmt.Errorf("node %s not found", input) - } - - if l := len(rl.Nodes); l > 1 { - return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) - } - - return rl.Nodes[0], nil - } - return rg.Node, nil -} - -func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { - // GetService to match via full ID. - rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) - if err != nil { - // If any error (including NotFound), ListServices to match via full name. - rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Services) == 0 { - // If any error or 0 result, ListServices to match via ID prefix. - rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Services) == 0 { - return nil, fmt.Errorf("service %s not found", input) - } - - if l := len(rl.Services); l > 1 { - return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) - } - - return rl.Services[0], nil - } - return rg.Service, nil -} - -func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { - // GetTask to match via full ID. - rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) - if err != nil { - // If any error (including NotFound), ListTasks to match via full name. - rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) - - if err != nil || len(rl.Tasks) == 0 { - // If any error or 0 result, ListTasks to match via ID prefix. - rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Tasks) == 0 { - return nil, fmt.Errorf("task %s not found", input) - } - - if l := len(rl.Tasks); l > 1 { - return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) - } - - return rl.Tasks[0], nil - } - return rg.Task, nil -} diff --git a/daemon/cluster/provider/network.go b/daemon/cluster/provider/network.go deleted file mode 100644 index d99c2f729..000000000 --- a/daemon/cluster/provider/network.go +++ /dev/null @@ -1,37 +0,0 @@ -package provider - -import "github.com/docker/engine-api/types" - -// NetworkCreateRequest is a request when creating a network. -type NetworkCreateRequest struct { - ID string - types.NetworkCreateRequest -} - -// NetworkCreateResponse is a response when creating a network. -type NetworkCreateResponse struct { - ID string `json:"Id"` -} - -// VirtualAddress represents a virtual address. -type VirtualAddress struct { - IPv4 string - IPv6 string -} - -// PortConfig represents a port configuration. -type PortConfig struct { - Name string - Protocol int32 - TargetPort uint32 - PublishedPort uint32 -} - -// ServiceConfig represents a service configuration. -type ServiceConfig struct { - ID string - Name string - Aliases map[string][]string - VirtualAddresses map[string]*VirtualAddress - ExposedPorts []*PortConfig -} diff --git a/daemon/commit.go b/daemon/commit.go deleted file mode 100644 index 507a31123..000000000 --- a/daemon/commit.go +++ /dev/null @@ -1,265 +0,0 @@ -package daemon - -import ( - "encoding/json" - "fmt" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// merge merges two Config, the image container configuration (defaults values), -// and the user container configuration, either passed by the API or generated -// by the cli. -// It will mutate the specified user configuration (userConf) with the image -// configuration where the user configuration is incomplete. -func merge(userConf, imageConf *containertypes.Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(nat.PortSet) - } - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if imageEnvKey == userEnvKey { - found = true - break - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - - if userConf.Labels == nil { - userConf.Labels = map[string]string{} - } - if imageConf.Labels != nil { - for l := range userConf.Labels { - imageConf.Labels[l] = userConf.Labels[l] - } - userConf.Labels = imageConf.Labels - } - - if len(userConf.Entrypoint) == 0 { - if len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - userConf.ArgsEscaped = imageConf.ArgsEscaped - } - - if userConf.Entrypoint == nil { - userConf.Entrypoint = imageConf.Entrypoint - } - } - if imageConf.Healthcheck != nil { - if userConf.Healthcheck == nil { - userConf.Healthcheck = imageConf.Healthcheck - } else { - if len(userConf.Healthcheck.Test) == 0 { - userConf.Healthcheck.Test = imageConf.Healthcheck.Test - } - if userConf.Healthcheck.Interval == 0 { - userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval - } - if userConf.Healthcheck.Timeout == 0 { - userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout - } - if userConf.Healthcheck.Retries == 0 { - userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries - } - } - } - - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - - if userConf.StopSignal == "" { - userConf.StopSignal = imageConf.StopSignal - } - return nil -} - -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository. -func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return "", err - } - - // It is not possible to commit a running container on Windows - if runtime.GOOS == "windows" && container.IsRunning() { - return "", fmt.Errorf("Windows does not support commit of a running container") - } - - if c.Pause && !container.IsPaused() { - daemon.containerPause(container) - defer daemon.containerUnpause(container) - } - - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) - if err != nil { - return "", err - } - - if c.MergeConfigs { - if err := merge(newConfig, container.Config); err != nil { - return "", err - } - } - - rwTar, err := daemon.exportContainerRw(container) - if err != nil { - return "", err - } - defer func() { - if rwTar != nil { - rwTar.Close() - } - }() - - var history []image.History - rootFS := image.NewRootFS() - osVersion := "" - var osFeatures []string - - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return "", err - } - history = img.History - rootFS = img.RootFS - osVersion = img.OSVersion - osFeatures = img.OSFeatures - } - - l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) - if err != nil { - return "", err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - - h := image.History{ - Author: c.Author, - Created: time.Now().UTC(), - CreatedBy: strings.Join(container.Config.Cmd, " "), - Comment: c.Comment, - EmptyLayer: true, - } - - if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { - h.EmptyLayer = false - rootFS.Append(diffID) - } - - history = append(history, h) - - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: newConfig, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Container: container.ID, - ContainerConfig: *container.Config, - Author: c.Author, - Created: h.Created, - }, - RootFS: rootFS, - History: history, - OSFeatures: osFeatures, - OSVersion: osVersion, - }) - - if err != nil { - return "", err - } - - id, err := daemon.imageStore.Create(config) - if err != nil { - return "", err - } - - if container.ImageID != "" { - if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { - return "", err - } - } - - if c.Repo != "" { - newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer - if err != nil { - return "", err - } - if c.Tag != "" { - if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { - return "", err - } - } - if err := daemon.TagImageWithReference(id, newTag); err != nil { - return "", err - } - } - - attributes := map[string]string{ - "comment": c.Comment, - } - daemon.LogContainerEventWithAttributes(container, "commit", attributes) - return id.String(), nil -} - -func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { - if err := daemon.Mount(container); err != nil { - return nil, err - } - - archive, err := container.RWLayer.TarStream() - if err != nil { - daemon.Unmount(container) // logging is already handled in the `Unmount` function - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - archive.Close() - return container.RWLayer.Unmount() - }), - nil -} diff --git a/daemon/config.go b/daemon/config.go deleted file mode 100644 index 262918b32..000000000 --- a/daemon/config.go +++ /dev/null @@ -1,444 +0,0 @@ -package daemon - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/discovery" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/registry" - "github.com/imdario/mergo" -) - -const ( - // defaultMaxConcurrentDownloads is the default value for - // maximum number of downloads that - // may take place at a time for each pull. - defaultMaxConcurrentDownloads = 3 - // defaultMaxConcurrentUploads is the default value for - // maximum number of uploads that - // may take place at a time for each push. - defaultMaxConcurrentUploads = 5 - // stockRuntimeName is the reserved name/alias used to represent the - // OCI runtime being shipped with the docker daemon package. - stockRuntimeName = "runc" -) - -const ( - defaultNetworkMtu = 1500 - disableNetworkBridge = "none" -) - -// flatOptions contains configuration keys -// that MUST NOT be parsed as deep structures. -// Use this to differentiate these options -// with others like the ones in CommonTLSOptions. -var flatOptions = map[string]bool{ - "cluster-store-opts": true, - "log-opts": true, - "runtimes": true, -} - -// LogConfig represents the default log configuration. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type LogConfig struct { - Type string `json:"log-driver,omitempty"` - Config map[string]string `json:"log-opts,omitempty"` -} - -// commonBridgeConfig stores all the platform-common bridge driver specific -// configuration. -type commonBridgeConfig struct { - Iface string `json:"bridge,omitempty"` - FixedCIDR string `json:"fixed-cidr,omitempty"` -} - -// CommonTLSOptions defines TLS configuration for the daemon server. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonTLSOptions struct { - CAFile string `json:"tlscacert,omitempty"` - CertFile string `json:"tlscert,omitempty"` - KeyFile string `json:"tlskey,omitempty"` -} - -// CommonConfig defines the configuration of a docker daemon which is -// common across platforms. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonConfig struct { - AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins - AutoRestart bool `json:"-"` - Context map[string][]string `json:"-"` - DisableBridge bool `json:"-"` - DNS []string `json:"dns,omitempty"` - DNSOptions []string `json:"dns-opts,omitempty"` - DNSSearch []string `json:"dns-search,omitempty"` - ExecOptions []string `json:"exec-opts,omitempty"` - GraphDriver string `json:"storage-driver,omitempty"` - GraphOptions []string `json:"storage-opts,omitempty"` - Labels []string `json:"labels,omitempty"` - Mtu int `json:"mtu,omitempty"` - Pidfile string `json:"pidfile,omitempty"` - RawLogs bool `json:"raw-logs,omitempty"` - Root string `json:"graph,omitempty"` - SocketGroup string `json:"group,omitempty"` - TrustKeyPath string `json:"-"` - CorsHeaders string `json:"api-cors-header,omitempty"` - EnableCors bool `json:"api-enable-cors,omitempty"` - LiveRestore bool `json:"live-restore,omitempty"` - - // ClusterStore is the storage backend used for the cluster information. It is used by both - // multihost networking (to store networks and endpoints information) and by the node discovery - // mechanism. - ClusterStore string `json:"cluster-store,omitempty"` - - // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such - // as TLS configuration settings. - ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` - - // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node - // discovery. This should be a 'host:port' combination on which that daemon instance is - // reachable by other hosts. - ClusterAdvertise string `json:"cluster-advertise,omitempty"` - - // MaxConcurrentDownloads is the maximum number of downloads that - // may take place at a time for each pull. - MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` - - // MaxConcurrentUploads is the maximum number of uploads that - // may take place at a time for each push. - MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` - - Debug bool `json:"debug,omitempty"` - Hosts []string `json:"hosts,omitempty"` - LogLevel string `json:"log-level,omitempty"` - TLS bool `json:"tls,omitempty"` - TLSVerify bool `json:"tlsverify,omitempty"` - - // Embedded structs that allow config - // deserialization without the full struct. - CommonTLSOptions - LogConfig - bridgeConfig // bridgeConfig holds bridge network specific configuration. - registry.ServiceOptions - - reloadLock sync.Mutex - valuesSet map[string]interface{} -} - -// InstallCommonFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { - var maxConcurrentDownloads, maxConcurrentUploads int - - config.ServiceOptions.InstallCliFlags(cmd, usageFn) - - cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) - cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last")) - cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options")) - cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) - cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) - cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) - cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) - cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) - cmd.BoolVar(&config.RawLogs, []string{"-raw-logs"}, false, usageFn("Full timestamps without ANSI coloring")) - // FIXME: why the inconsistency between "hosts" and "sockets"? - cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) - cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) - cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) - cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) - cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) - cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) - cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) - cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) - cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) - cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) - cmd.IntVar(&maxConcurrentDownloads, []string{"-max-concurrent-downloads"}, defaultMaxConcurrentDownloads, usageFn("Set the max concurrent downloads for each pull")) - cmd.IntVar(&maxConcurrentUploads, []string{"-max-concurrent-uploads"}, defaultMaxConcurrentUploads, usageFn("Set the max concurrent uploads for each push")) - - config.MaxConcurrentDownloads = &maxConcurrentDownloads - config.MaxConcurrentUploads = &maxConcurrentUploads -} - -// IsValueSet returns true if a configuration value -// was explicitly set in the configuration file. -func (config *Config) IsValueSet(name string) bool { - if config.valuesSet == nil { - return false - } - _, ok := config.valuesSet[name] - return ok -} - -func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { - if clusterAdvertise == "" { - return "", errDiscoveryDisabled - } - if clusterStore == "" { - return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") - } - - advertise, err := discovery.ParseAdvertise(clusterAdvertise) - if err != nil { - return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) - } - return advertise, nil -} - -// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. -func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { - logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) - newConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - return err - } - - if err := ValidateConfiguration(newConfig); err != nil { - return fmt.Errorf("file configuration validation failed (%v)", err) - } - - reload(newConfig) - return nil -} - -// boolValue is an interface that boolean value flags implement -// to tell the command line how to make -name equivalent to -name=true. -type boolValue interface { - IsBoolFlag() bool -} - -// MergeDaemonConfigurations reads a configuration file, -// loads the file configuration in an isolated structure, -// and merges the configuration provided from flags on top -// if there are no conflicts. -func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { - fileConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - return nil, err - } - - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) - } - - // merge flags configuration on top of the file configuration - if err := mergo.Merge(fileConfig, flagsConfig); err != nil { - return nil, err - } - - // We need to validate again once both fileConfig and flagsConfig - // have been merged - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) - } - - return fileConfig, nil -} - -// getConflictFreeConfiguration loads the configuration from a JSON file. -// It compares that configuration with the one provided by the flags, -// and returns an error if there are conflicts. -func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { - b, err := ioutil.ReadFile(configFile) - if err != nil { - return nil, err - } - - var config Config - var reader io.Reader - if flags != nil { - var jsonConfig map[string]interface{} - reader = bytes.NewReader(b) - if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { - return nil, err - } - - configSet := configValuesSet(jsonConfig) - - if err := findConfigurationConflicts(configSet, flags); err != nil { - return nil, err - } - - // Override flag values to make sure the values set in the config file with nullable values, like `false`, - // are not overridden by default truthy values from the flags that were not explicitly set. - // See https://github.com/docker/docker/issues/20289 for an example. - // - // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. - namedOptions := make(map[string]interface{}) - for key, value := range configSet { - f := flags.Lookup("-" + key) - if f == nil { // ignore named flags that don't match - namedOptions[key] = value - continue - } - - if _, ok := f.Value.(boolValue); ok { - f.Value.Set(fmt.Sprintf("%v", value)) - } - } - if len(namedOptions) > 0 { - // set also default for mergeVal flags that are boolValue at the same time. - flags.VisitAll(func(f *flag.Flag) { - if opt, named := f.Value.(opts.NamedOption); named { - v, set := namedOptions[opt.Name()] - _, boolean := f.Value.(boolValue) - if set && boolean { - f.Value.Set(fmt.Sprintf("%v", v)) - } - } - }) - } - - config.valuesSet = configSet - } - - reader = bytes.NewReader(b) - err = json.NewDecoder(reader).Decode(&config) - return &config, err -} - -// configValuesSet returns the configuration values explicitly set in the file. -func configValuesSet(config map[string]interface{}) map[string]interface{} { - flatten := make(map[string]interface{}) - for k, v := range config { - if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { - for km, vm := range m { - flatten[km] = vm - } - continue - } - - flatten[k] = v - } - return flatten -} - -// findConfigurationConflicts iterates over the provided flags searching for -// duplicated configurations and unknown keys. It returns an error with all the conflicts if -// it finds any. -func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { - // 1. Search keys from the file that we don't recognize as flags. - unknownKeys := make(map[string]interface{}) - for key, value := range config { - flagName := "-" + key - if flag := flags.Lookup(flagName); flag == nil { - unknownKeys[key] = value - } - } - - // 2. Discard values that implement NamedOption. - // Their configuration name differs from their flag name, like `labels` and `label`. - if len(unknownKeys) > 0 { - unknownNamedConflicts := func(f *flag.Flag) { - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if _, valid := unknownKeys[namedOption.Name()]; valid { - delete(unknownKeys, namedOption.Name()) - } - } - } - flags.VisitAll(unknownNamedConflicts) - } - - if len(unknownKeys) > 0 { - var unknown []string - for key := range unknownKeys { - unknown = append(unknown, key) - } - return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) - } - - var conflicts []string - printConflict := func(name string, flagValue, fileValue interface{}) string { - return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) - } - - // 3. Search keys that are present as a flag and as a file option. - duplicatedConflicts := func(f *flag.Flag) { - // search option name in the json configuration payload if the value is a named option - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if optsValue, ok := config[namedOption.Name()]; ok { - conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) - } - } else { - // search flag name in the json configuration payload without trailing dashes - for _, name := range f.Names { - name = strings.TrimLeft(name, "-") - - if value, ok := config[name]; ok { - conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) - break - } - } - } - } - - flags.Visit(duplicatedConflicts) - - if len(conflicts) > 0 { - return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) - } - return nil -} - -// ValidateConfiguration validates some specific configs. -// such as config.DNS, config.Labels, config.DNSSearch, -// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. -func ValidateConfiguration(config *Config) error { - // validate DNS - for _, dns := range config.DNS { - if _, err := opts.ValidateIPAddress(dns); err != nil { - return err - } - } - - // validate DNSSearch - for _, dnsSearch := range config.DNSSearch { - if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { - return err - } - } - - // validate Labels - for _, label := range config.Labels { - if _, err := opts.ValidateLabel(label); err != nil { - return err - } - } - - // validate MaxConcurrentDownloads - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { - return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) - } - - // validate MaxConcurrentUploads - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { - return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) - } - - // validate that "default" runtime is not reset - if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { - if _, ok := runtimes[stockRuntimeName]; ok { - return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) - } - } - - if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { - runtimes := config.GetAllRuntimes() - if _, ok := runtimes[defaultRuntime]; !ok { - return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) - } - } - - return nil -} diff --git a/daemon/config_experimental.go b/daemon/config_experimental.go deleted file mode 100644 index ceb7c3822..000000000 --- a/daemon/config_experimental.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build experimental - -package daemon - -import flag "github.com/docker/docker/pkg/mflag" - -func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { -} diff --git a/daemon/config_solaris.go b/daemon/config_solaris.go deleted file mode 100644 index d20fc07b9..000000000 --- a/daemon/config_solaris.go +++ /dev/null @@ -1,43 +0,0 @@ -package daemon - -import ( - flag "github.com/docker/docker/pkg/mflag" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExec = "zones" -) - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker -d -e lxc` -type Config struct { - CommonConfig - - // Fields below here are platform specific. - ExecRoot string `json:"exec-root,omitempty"` -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags - config.attachExperimentalFlags(cmd, usageFn) -} - -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/daemon/config_stub.go b/daemon/config_stub.go deleted file mode 100644 index 796e6b6e4..000000000 --- a/daemon/config_stub.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !experimental - -package daemon - -import flag "github.com/docker/docker/pkg/mflag" - -func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { -} diff --git a/daemon/config_test.go b/daemon/config_test.go deleted file mode 100644 index 0375c1ae2..000000000 --- a/daemon/config_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" -) - -func TestDaemonConfigurationMerge(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"debug": true}`)) - f.Close() - - c := &Config{ - CommonConfig: CommonConfig{ - AutoRestart: true, - LogConfig: LogConfig{ - Type: "syslog", - Config: map[string]string{"tag": "test"}, - }, - }, - } - - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) - } -} - -func TestDaemonConfigurationNotFound(t *testing.T) { - _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("expected does not exist error, got %v", err) - } -} - -func TestDaemonBrokenConfiguration(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"Debug": tru`)) - f.Close() - - _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) - if err == nil { - t.Fatalf("expected error, got %v", err) - } -} - -func TestParseClusterAdvertiseSettings(t *testing.T) { - _, err := parseClusterAdvertiseSettings("something", "") - if err != errDiscoveryDisabled { - t.Fatalf("expected discovery disabled error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("", "something") - if err == nil { - t.Fatalf("expected discovery store error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") - if err != nil { - t.Fatal(err) - } -} - -func TestFindConfigurationConflicts(t *testing.T) { - config := map[string]interface{}{"authorization-plugins": "foobar"} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.String([]string{"-authorization-plugins"}, "", "") - if err := flags.Set("-authorization-plugins", "asdf"); err != nil { - t.Fatal(err) - } - - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "authorization-plugins: (from flag: asdf, from file: foobar)") { - t.Fatalf("expected authorization-plugins conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { - config := map[string]interface{}{"hosts": []string{"qwer"}} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - var hosts []string - flags.Var(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") - if err := flags.Set("-host", "tcp://127.0.0.1:4444"); err != nil { - t.Fatal(err) - } - if err := flags.Set("H", "unix:///var/run/docker.sock"); err != nil { - t.Fatal(err) - } - - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "hosts") { - t.Fatalf("expected hosts conflict, got %v", err) - } -} - -func TestDaemonConfigurationMergeConflicts(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"debug": true}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"debug"}, false, "") - flags.Set("debug", "false") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "debug") { - t.Fatalf("expected debug conflict, got %v", err) - } -} - -func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"tlscacert"}, "", "") - flags.Set("tlscacert", "~/.docker/ca.pem") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "tlscacert") { - t.Fatalf("expected tlscacert conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { - config := map[string]interface{}{"tls-verify": "true"} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.Bool([]string{"-tlsverify"}, false, "") - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { - t.Fatalf("expected tls-verify conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { - var hosts []string - config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} - base := mflag.NewFlagSet("base", mflag.ContinueOnError) - base.Var(opts.NewNamedListOptsRef("hosts", &hosts, nil), []string{"H", "-host"}, "") - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - mflag.Merge(flags, base) - - err := findConfigurationConflicts(config, flags) - if err != nil { - t.Fatal(err) - } - - flags.Set("-host", "unix:///var/run/docker.sock") - err = findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { - t.Fatalf("expected hosts conflict, got %v", err) - } -} - -func TestValidateConfiguration(t *testing.T) { - c1 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one"}, - }, - } - - err := ValidateConfiguration(c1) - if err == nil { - t.Fatal("expected error, got nil") - } - - c2 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one=two"}, - }, - } - - err = ValidateConfiguration(c2) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c3 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1"}, - }, - } - - err = ValidateConfiguration(c3) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c4 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1o"}, - }, - } - - err = ValidateConfiguration(c4) - if err == nil { - t.Fatal("expected error, got nil") - } - - c5 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"a.b.c"}, - }, - } - - err = ValidateConfiguration(c5) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c6 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"123456"}, - }, - } - - err = ValidateConfiguration(c6) - if err == nil { - t.Fatal("expected error, got nil") - } -} diff --git a/daemon/config_unix.go b/daemon/config_unix.go deleted file mode 100644 index 26beaf4f3..000000000 --- a/daemon/config_unix.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "fmt" - "net" - - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExecRoot = "/var/run/docker" -) - -// Config defines the configuration of a docker daemon. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line uses. -type Config struct { - CommonConfig - - // Fields below here are platform specific. - CgroupParent string `json:"cgroup-parent,omitempty"` - ContainerdAddr string `json:"containerd,omitempty"` - EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` - ExecRoot string `json:"exec-root,omitempty"` - RemappedRoot string `json:"userns-remap,omitempty"` - Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` - Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` - DefaultRuntime string `json:"default-runtime,omitempty"` - OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig - - // Fields below here are platform specific. - EnableIPv6 bool `json:"ipv6,omitempty"` - EnableIPTables bool `json:"iptables,omitempty"` - EnableIPForward bool `json:"ip-forward,omitempty"` - EnableIPMasq bool `json:"ip-masq,omitempty"` - EnableUserlandProxy bool `json:"userland-proxy,omitempty"` - DefaultIP net.IP `json:"ip,omitempty"` - IP string `json:"bip,omitempty"` - FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` - DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` - DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` - InterContainerCommunication bool `json:"icc,omitempty"` -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags - cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) - cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) - config.Ulimits = make(map[string]*units.Ulimit) - cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) - cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) - cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) - cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) - cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) - cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files")) - cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) - cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) - cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) - cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) - cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) - cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) - cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) - cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) - cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) - cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket")) - cmd.BoolVar(&config.LiveRestore, []string{"-live-restore"}, false, usageFn("Enable live restore of docker when containers are still running")) - config.Runtimes = make(map[string]types.Runtime) - cmd.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), []string{"-add-runtime"}, usageFn("Register an additional OCI compatible runtime")) - cmd.StringVar(&config.DefaultRuntime, []string{"-default-runtime"}, stockRuntimeName, usageFn("Default OCI runtime to be used")) - cmd.IntVar(&config.OOMScoreAdjust, []string{"-oom-score-adjust"}, -500, usageFn("Set the oom_score_adj for the daemon")) - - config.attachExperimentalFlags(cmd, usageFn) -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - config.reloadLock.Lock() - defer config.reloadLock.Unlock() - if rt, ok := config.Runtimes[name]; ok { - return &rt - } - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - config.reloadLock.Lock() - rt := config.DefaultRuntime - config.reloadLock.Unlock() - - return rt -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - config.reloadLock.Lock() - rts := config.Runtimes - config.reloadLock.Unlock() - return rts -} - -func (config *Config) isSwarmCompatible() error { - if config.ClusterStore != "" || config.ClusterAdvertise != "" { - return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") - } - if config.LiveRestore { - return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") - } - return nil -} diff --git a/daemon/config_windows.go b/daemon/config_windows.go deleted file mode 100644 index 2844b983c..000000000 --- a/daemon/config_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -package daemon - -import ( - "os" - - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -var ( - defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid" - defaultGraph = os.Getenv("programdata") + string(os.PathSeparator) + "docker" -) - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig -} - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker daemon -e windows` -type Config struct { - CommonConfig - - // Fields below here are platform specific. (There are none presently - // for the Windows daemon.) -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags. - cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) - cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") - cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "", usageFn("Users or groups that can access the named pipe")) -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - return stockRuntimeName -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - return map[string]types.Runtime{} -} - -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/daemon/container.go b/daemon/container.go deleted file mode 100644 index b9f63dedd..000000000 --- a/daemon/container.go +++ /dev/null @@ -1,256 +0,0 @@ -package daemon - -import ( - "fmt" - "path/filepath" - "regexp" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// GetContainer looks for a container using the provided information, which could be -// one of the following inputs from the caller: -// - A full container ID, which will exact match a container in daemon's list -// - A container name, which will only exact match via the GetByName() function -// - A partial container ID prefix (e.g. short ID) of any length that is -// unique enough to only return a single container object -// If none of these searches succeed, an error is returned -func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { - if len(prefixOrName) == 0 { - return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) - } - - if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { - // prefix is an exact match to a full container ID - return containerByID, nil - } - - // GetByName will match only an exact name provided; we ignore errors - if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { - // prefix is an exact match to a full container Name - return containerByName, nil - } - - containerID, indexError := daemon.idIndex.Get(prefixOrName) - if indexError != nil { - // When truncindex defines an error type, use that instead - if indexError == truncindex.ErrNotExist { - err := fmt.Errorf("No such container: %s", prefixOrName) - return nil, errors.NewRequestNotFoundError(err) - } - return nil, indexError - } - return daemon.containers.Get(containerID), nil -} - -// Exists returns a true if a container of the specified ID or name exists, -// false otherwise. -func (daemon *Daemon) Exists(id string) bool { - c, _ := daemon.GetContainer(id) - return c != nil -} - -// IsPaused returns a bool indicating if the specified container is paused. -func (daemon *Daemon) IsPaused(id string) bool { - c, _ := daemon.GetContainer(id) - return c.State.IsPaused() -} - -func (daemon *Daemon) containerRoot(id string) string { - return filepath.Join(daemon.repository, id) -} - -// Load reads the contents of a container from disk -// This is typically done at startup. -func (daemon *Daemon) load(id string) (*container.Container, error) { - container := daemon.newBaseContainer(id) - - if err := container.FromDisk(); err != nil { - return nil, err - } - - if container.ID != id { - return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) - } - - return container, nil -} - -// Register makes a container object usable by the daemon as -func (daemon *Daemon) Register(c *container.Container) error { - // Attach to stdout and stderr - if c.Config.OpenStdin { - c.NewInputPipes() - } else { - c.NewNopInputPipe() - } - - daemon.containers.Add(c.ID, c) - daemon.idIndex.Add(c.ID) - - return nil -} - -func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) { - var ( - id string - err error - noExplicitName = name == "" - ) - id, name, err = daemon.generateIDAndName(name) - if err != nil { - return nil, err - } - - daemon.generateHostname(id, config) - entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) - - base := daemon.newBaseContainer(id) - base.Created = time.Now().UTC() - base.Managed = managed - base.Path = entrypoint - base.Args = args //FIXME: de-duplicate from config - base.Config = config - base.HostConfig = &containertypes.HostConfig{} - base.ImageID = imgID - base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} - base.Name = name - base.Driver = daemon.GraphDriverName() - - return base, err -} - -// GetByName returns a container given a name. -func (daemon *Daemon) GetByName(name string) (*container.Container, error) { - if len(name) == 0 { - return nil, fmt.Errorf("No container name supplied") - } - fullName := name - if name[0] != '/' { - fullName = "/" + name - } - id, err := daemon.nameIndex.Get(fullName) - if err != nil { - return nil, fmt.Errorf("Could not find entity for %s", name) - } - e := daemon.containers.Get(id) - if e == nil { - return nil, fmt.Errorf("Could not find container for entity id %s", id) - } - return e, nil -} - -// newBaseContainer creates a new container with its initial -// configuration based on the root storage from the daemon. -func (daemon *Daemon) newBaseContainer(id string) *container.Container { - return container.NewBaseContainer(id, daemon.containerRoot(id)) -} - -func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { - if len(configEntrypoint) != 0 { - return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) - } - return configCmd[0], configCmd[1:] -} - -func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { - // Generate default hostname - if config.Hostname == "" { - config.Hostname = id[:12] - } -} - -func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - return parseSecurityOpt(container, hostConfig) -} - -func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { - // Do not lock while creating volumes since this could be calling out to external plugins - // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin - if err := daemon.registerMountPoints(container, hostConfig); err != nil { - return err - } - - container.Lock() - defer container.Unlock() - - // Register any links from the host config before starting the container - if err := daemon.registerLinks(container, hostConfig); err != nil { - return err - } - - // make sure links is not nil - // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links - if hostConfig.Links == nil { - hostConfig.Links = []string{} - } - - container.HostConfig = hostConfig - return container.ToDisk() -} - -// verifyContainerSettings performs validation of the hostconfig and config -// structures. -func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool, validateHostname bool) ([]string, error) { - - // First perform verification of settings common across all platforms. - if config != nil { - if config.WorkingDir != "" { - config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics - if !system.IsAbs(config.WorkingDir) { - return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path", config.WorkingDir) - } - } - - if len(config.StopSignal) > 0 { - _, err := signal.ParseSignal(config.StopSignal) - if err != nil { - return nil, err - } - } - - // Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant. - if validateHostname && len(config.Hostname) > 0 { - // RFC1123 specifies that 63 bytes is the maximium length - // Windows has the limitation of 63 bytes in length - // Linux hostname is limited to HOST_NAME_MAX=64, not including the terminating null byte. - // We limit the length to 63 bytes here to match RFC1035 and RFC1123. - matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname) - if len(config.Hostname) > 63 || !matched { - return nil, fmt.Errorf("invalid hostname format: %s", config.Hostname) - } - } - } - - if hostConfig == nil { - return nil, nil - } - - for port := range hostConfig.PortBindings { - _, portStr := nat.SplitProtoPort(string(port)) - if _, err := nat.ParsePort(portStr); err != nil { - return nil, fmt.Errorf("Invalid port specification: %q", portStr) - } - for _, pb := range hostConfig.PortBindings[port] { - _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) - if err != nil { - return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) - } - } - } - - // Now do platform-specific verification - return verifyPlatformContainerSettings(daemon, hostConfig, config, update) -} diff --git a/daemon/container_operations.go b/daemon/container_operations.go deleted file mode 100644 index 8c731ed0d..000000000 --- a/daemon/container_operations.go +++ /dev/null @@ -1,768 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "net" - "os" - "path" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" -) - -var ( - // ErrRootFSReadOnly is returned when a container - // rootfs is marked readonly. - ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") - getPortMapInfo = container.GetSandboxPortMapInfo -) - -func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { - var ( - sboxOptions []libnetwork.SandboxOption - err error - dns []string - dnsSearch []string - dnsOptions []string - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), - libnetwork.OptionDomainname(container.Config.Domainname)) - - if container.HostConfig.NetworkMode.IsHost() { - sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) - if len(container.HostConfig.ExtraHosts) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) - } - if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && - len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && - len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) - } - } else { - // OptionUseExternalKey is mandatory for userns support. - // But optional for non-userns support - sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) - } - - container.HostsPath, err = container.GetRootResourcePath("hosts") - if err != nil { - return nil, err - } - sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) - - container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") - if err != nil { - return nil, err - } - sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) - - if len(container.HostConfig.DNS) > 0 { - dns = container.HostConfig.DNS - } else if len(daemon.configStore.DNS) > 0 { - dns = daemon.configStore.DNS - } - - for _, d := range dns { - sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) - } - - if len(container.HostConfig.DNSSearch) > 0 { - dnsSearch = container.HostConfig.DNSSearch - } else if len(daemon.configStore.DNSSearch) > 0 { - dnsSearch = daemon.configStore.DNSSearch - } - - for _, ds := range dnsSearch { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) - } - - if len(container.HostConfig.DNSOptions) > 0 { - dnsOptions = container.HostConfig.DNSOptions - } else if len(daemon.configStore.DNSOptions) > 0 { - dnsOptions = daemon.configStore.DNSOptions - } - - for _, ds := range dnsOptions { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) - } - - if container.NetworkSettings.SecondaryIPAddresses != nil { - name := container.Config.Hostname - if container.Config.Domainname != "" { - name = name + "." + container.Config.Domainname - } - - for _, a := range container.NetworkSettings.SecondaryIPAddresses { - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) - } - } - - for _, extraHost := range container.HostConfig.ExtraHosts { - // allow IPv6 addresses in extra hosts; only split on first ":" - parts := strings.SplitN(extraHost, ":", 2) - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - sboxOptions = append(sboxOptions, - libnetwork.OptionPortMapping(pbList), - libnetwork.OptionExposedPorts(exposeList)) - - // Legacy Link feature is supported only for the default bridge network. - // return if this call to build join options is not for default bridge network - // Legacy Link is only supported by docker run --link - if _, ok := container.NetworkSettings.Networks[defaultNetName]; !container.HostConfig.NetworkMode.IsDefault() || !ok { - return sboxOptions, nil - } - - if container.NetworkSettings.Networks[defaultNetName].EndpointID == "" { - return sboxOptions, nil - } - - var ( - childEndpoints, parentEndpoints []string - cEndpointID string - ) - - children := daemon.children(container) - for linkAlias, child := range children { - if !isLinkable(child) { - return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) - } - _, alias := path.Split(linkAlias) - // allow access to the linked container via the alias, real name, and container hostname - aliasList := alias + " " + child.Config.Hostname - // only add the name if alias isn't equal to the name - if alias != child.Name[1:] { - aliasList = aliasList + " " + child.Name[1:] - } - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) - cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID - if cEndpointID != "" { - childEndpoints = append(childEndpoints, cEndpointID) - } - } - - bridgeSettings := container.NetworkSettings.Networks[defaultNetName] - for alias, parent := range daemon.parents(container) { - if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { - continue - } - - _, alias = path.Split(alias) - logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) - sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( - parent.ID, - alias, - bridgeSettings.IPAddress, - )) - if cEndpointID != "" { - parentEndpoints = append(parentEndpoints, cEndpointID) - } - } - - linkOptions := options.Generic{ - netlabel.GenericData: options.Generic{ - "ParentEndpoints": parentEndpoints, - "ChildEndpoints": childEndpoints, - }, - } - - sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) - return sboxOptions, nil -} - -func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network) error { - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{Networks: make(map[string]*networktypes.EndpointSettings)} - } - - if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - - for s := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(s) - if err != nil { - continue - } - - if sn.Name() == n.Name() { - // Avoid duplicate config - return nil - } - if !containertypes.NetworkMode(sn.Type()).IsPrivate() || - !containertypes.NetworkMode(n.Type()).IsPrivate() { - return runconfig.ErrConflictSharedNetwork - } - if containertypes.NetworkMode(sn.Name()).IsNone() || - containertypes.NetworkMode(n.Name()).IsNone() { - return runconfig.ErrConflictNoNetwork - } - } - - if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { - container.NetworkSettings.Networks[n.Name()] = new(networktypes.EndpointSettings) - } - - return nil -} - -func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.BuildEndpointInfo(n, ep); err != nil { - return err - } - - if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { - container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface - } - - return nil -} - -// UpdateNetwork is used to update the container's network (e.g. when linked containers -// get removed/unlinked). -func (daemon *Daemon) updateNetwork(container *container.Container) error { - ctrl := daemon.netController - sid := container.NetworkSettings.SandboxID - - sb, err := ctrl.SandboxByID(sid) - if err != nil { - return fmt.Errorf("error locating sandbox id %s: %v", sid, err) - } - - // Find if container is connected to the default bridge network - var n libnetwork.Network - for name := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(name) - if err != nil { - continue - } - if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { - n = sn - break - } - } - - if n == nil { - // Not connected to the default bridge network; Nothing to do - return nil - } - - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return fmt.Errorf("Update network failed: %v", err) - } - - if err := sb.Refresh(options...); err != nil { - return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) - } - - return nil -} - -func errClusterNetworkOnRun(n string) error { - return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can only be used by a docker service", n) -} - -// updateContainerNetworkSettings update the network settings -func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { - var ( - n libnetwork.Network - err error - ) - - mode := container.HostConfig.NetworkMode - if container.Config.NetworkDisabled || mode.IsContainer() { - return nil - } - - networkName := mode.NetworkName() - if mode.IsDefault() { - networkName = daemon.netController.Config().Daemon.DefaultNetwork - } - if mode.IsUserDefined() { - n, err = daemon.FindNetwork(networkName) - if err != nil { - return err - } - if !container.Managed && n.Info().Dynamic() { - return errClusterNetworkOnRun(networkName) - } - networkName = n.Name() - } - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{} - } - if len(endpointsConfig) > 0 { - container.NetworkSettings.Networks = endpointsConfig - } - if container.NetworkSettings.Networks == nil { - container.NetworkSettings.Networks = make(map[string]*networktypes.EndpointSettings) - container.NetworkSettings.Networks[networkName] = new(networktypes.EndpointSettings) - } - if !mode.IsUserDefined() { - return nil - } - // Make sure to internally store the per network endpoint config by network name - if _, ok := container.NetworkSettings.Networks[networkName]; ok { - return nil - } - if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { - container.NetworkSettings.Networks[networkName] = nwConfig - delete(container.NetworkSettings.Networks, n.ID()) - return nil - } - - return nil -} - -func (daemon *Daemon) allocateNetwork(container *container.Container) error { - controller := daemon.netController - - if daemon.netController == nil { - return nil - } - - // Cleanup any stale sandbox left over due to ungraceful daemon shutdown - if err := controller.SandboxDestroy(container.ID); err != nil { - logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) - } - - updateSettings := false - if len(container.NetworkSettings.Networks) == 0 { - if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { - return nil - } - - err := daemon.updateContainerNetworkSettings(container, nil) - if err != nil { - return err - } - updateSettings = true - } - - // always connect default network first since only default - // network mode support link and we need do some setting - // on sandbox initialize for link, but the sandbox only be initialized - // on first network connecting. - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { - if err := daemon.connectToNetwork(container, defaultNetName, nConf, updateSettings); err != nil { - return err - } - - } - for n, nConf := range container.NetworkSettings.Networks { - if n == defaultNetName { - continue - } - if err := daemon.connectToNetwork(container, n, nConf, updateSettings); err != nil { - return err - } - } - - return container.WriteHostConfig() -} - -func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { - var sb libnetwork.Sandbox - daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { - if s.ContainerID() == container.ID { - sb = s - return true - } - return false - }) - return sb -} - -// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration -func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { - return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) -} - -// User specified ip address is acceptable only for networks with user specified subnets. -func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { - if n == nil || epConfig == nil { - return nil - } - if !hasUserDefinedIPAddress(epConfig) { - return nil - } - _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() - for _, s := range []struct { - ipConfigured bool - subnetConfigs []*libnetwork.IpamConf - }{ - { - ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, - subnetConfigs: nwIPv4Configs, - }, - { - ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, - subnetConfigs: nwIPv6Configs, - }, - } { - if s.ipConfigured { - foundSubnet := false - for _, cfg := range s.subnetConfigs { - if len(cfg.PreferredPool) > 0 { - foundSubnet = true - break - } - } - if !foundSubnet { - return runconfig.ErrUnsupportedNetworkNoSubnetAndIP - } - } - } - - return nil -} - -// cleanOperationalData resets the operational data from the passed endpoint settings -func cleanOperationalData(es *networktypes.EndpointSettings) { - es.EndpointID = "" - es.Gateway = "" - es.IPAddress = "" - es.IPPrefixLen = 0 - es.IPv6Gateway = "" - es.GlobalIPv6Address = "" - es.GlobalIPv6PrefixLen = 0 - es.MacAddress = "" -} - -func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (libnetwork.Network, error) { - if container.HostConfig.NetworkMode.IsContainer() { - return nil, runconfig.ErrConflictSharedNetwork - } - - if containertypes.NetworkMode(idOrName).IsBridge() && - daemon.configStore.DisableBridge { - container.Config.NetworkDisabled = true - return nil, nil - } - - if !containertypes.NetworkMode(idOrName).IsUserDefined() { - if hasUserDefinedIPAddress(endpointConfig) { - return nil, runconfig.ErrUnsupportedNetworkAndIP - } - if endpointConfig != nil && len(endpointConfig.Aliases) > 0 { - return nil, runconfig.ErrUnsupportedNetworkAndAlias - } - } else { - addShortID := true - shortID := stringid.TruncateID(container.ID) - for _, alias := range endpointConfig.Aliases { - if alias == shortID { - addShortID = false - break - } - } - if addShortID { - endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) - } - } - - n, err := daemon.FindNetwork(idOrName) - if err != nil { - return nil, err - } - - if err := validateNetworkingConfig(n, endpointConfig); err != nil { - return nil, err - } - - if updateSettings { - if err := daemon.updateNetworkSettings(container, n); err != nil { - return nil, err - } - } - return n, nil -} - -func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - n, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, updateSettings) - if err != nil { - return err - } - if n == nil { - return nil - } - - controller := daemon.netController - - sb := daemon.getNetworkSandbox(container) - createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb) - if err != nil { - return err - } - - endpointName := strings.TrimPrefix(container.Name, "/") - ep, err := n.CreateEndpoint(endpointName, createOptions...) - if err != nil { - return err - } - defer func() { - if err != nil { - if e := ep.Delete(false); e != nil { - logrus.Warnf("Could not rollback container connection to network %s", idOrName) - } - } - }() - container.NetworkSettings.Networks[n.Name()] = endpointConfig - - if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { - return err - } - - if sb == nil { - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return err - } - sb, err = controller.NewSandbox(container.ID, options...) - if err != nil { - return err - } - - container.UpdateSandboxNetworkSettings(sb) - } - - joinOptions, err := container.BuildJoinOptions(n) - if err != nil { - return err - } - - if err := ep.Join(sb, joinOptions...); err != nil { - return err - } - - if err := container.UpdateJoinInfo(n, ep); err != nil { - return fmt.Errorf("Updating join info failed: %v", err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sb) - - daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) - return nil -} - -// ForceEndpointDelete deletes an endpoing from a network forcefully -func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error { - ep, err := n.EndpointByName(name) - if err != nil { - return err - } - return ep.Delete(true) -} - -func disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - var ( - ep libnetwork.Endpoint - sbox libnetwork.Sandbox - ) - - s := func(current libnetwork.Endpoint) bool { - epInfo := current.Info() - if epInfo == nil { - return false - } - if sb := epInfo.Sandbox(); sb != nil { - if sb.ContainerID() == container.ID { - ep = current - sbox = sb - return true - } - } - return false - } - n.WalkEndpoints(s) - - if ep == nil && force { - epName := strings.TrimPrefix(container.Name, "/") - ep, err := n.EndpointByName(epName) - if err != nil { - return err - } - return ep.Delete(force) - } - - if ep == nil { - return fmt.Errorf("container %s is not connected to the network", container.ID) - } - - if err := ep.Leave(sbox); err != nil { - return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sbox) - - if err := ep.Delete(false); err != nil { - return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) - } - - delete(container.NetworkSettings.Networks, n.Name()) - return nil -} - -func (daemon *Daemon) initializeNetworking(container *container.Container) error { - var err error - - if container.HostConfig.NetworkMode.IsContainer() { - // we need to get the hosts files from the container to join - nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - container.HostnamePath = nc.HostnamePath - container.HostsPath = nc.HostsPath - container.ResolvConfPath = nc.ResolvConfPath - container.Config.Hostname = nc.Config.Hostname - container.Config.Domainname = nc.Config.Domainname - return nil - } - - if container.HostConfig.NetworkMode.IsHost() { - container.Config.Hostname, err = os.Hostname() - if err != nil { - return err - } - } - - if err := daemon.allocateNetwork(container); err != nil { - return err - } - - return container.BuildHostnameFile() -} - -func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { - nc, err := daemon.GetContainer(connectedContainerID) - if err != nil { - return nil, err - } - if containerID == nc.ID { - return nil, fmt.Errorf("cannot join own network") - } - if !nc.IsRunning() { - err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) - return nil, derr.NewRequestConflictError(err) - } - if nc.IsRestarting() { - return nil, errContainerIsRestarting(connectedContainerID) - } - return nc, nil -} - -func (daemon *Daemon) releaseNetwork(container *container.Container) { - if daemon.netController == nil { - return - } - if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { - return - } - - sid := container.NetworkSettings.SandboxID - settings := container.NetworkSettings.Networks - container.NetworkSettings.Ports = nil - - if sid == "" || len(settings) == 0 { - return - } - - var networks []libnetwork.Network - for n, epSettings := range settings { - if nw, err := daemon.FindNetwork(n); err == nil { - networks = append(networks, nw) - } - cleanOperationalData(epSettings) - } - - sb, err := daemon.netController.SandboxByID(sid) - if err != nil { - logrus.Warnf("error locating sandbox id %s: %v", sid, err) - return - } - - if err := sb.Delete(); err != nil { - logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) - } - - for _, nw := range networks { - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) - } -} diff --git a/daemon/container_operations_solaris.go b/daemon/container_operations_solaris.go deleted file mode 100644 index b98faaae1..000000000 --- a/daemon/container_operations_solaris.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build solaris - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - return fmt.Errorf("Solaris does not support connecting a running container to a network") -} - -// getSize returns real size & virtual size -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - return 0, 0 -} - -// DisconnectFromNetwork disconnects a container from the network -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - return fmt.Errorf("Solaris does not support disconnecting a running container from a network") -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -func (daemon *Daemon) mountVolumes(container *container.Container) error { - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func isLinkable(child *container.Container) bool { - return false -} diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go deleted file mode 100644 index 55bd3fc83..000000000 --- a/daemon/container_operations_unix.go +++ /dev/null @@ -1,385 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/links" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/specs/specs-go" -) - -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - var env []string - children := daemon.children(container) - - bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if bridgeSettings == nil { - return nil, nil - } - - for linkAlias, child := range children { - if !child.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) - } - - childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if childBridgeSettings == nil { - return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) - } - - link := links.NewLink( - bridgeSettings.IPAddress, - childBridgeSettings.IPAddress, - linkAlias, - child.Config.Env, - child.Config.ExposedPorts, - ) - - for _, envVar := range link.ToEnv() { - env = append(env, envVar) - } - } - - return env, nil -} - -// getSize returns the real size & virtual size of the container. -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - ) - - if err := daemon.Mount(container); err != nil { - logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) - return sizeRw, sizeRootfs - } - defer daemon.Unmount(container) - - sizeRw, err = container.RWLayer.Size() - if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", - daemon.GraphDriverName(), container.ID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - - if parent := container.RWLayer.Parent(); parent != nil { - sizeRootfs, err = parent.Size() - if err != nil { - sizeRootfs = -1 - } else if sizeRw != -1 { - sizeRootfs += sizeRw - } - } - return sizeRw, sizeRootfs -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - if !container.Running { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - if _, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, true); err != nil { - return err - } - container.NetworkSettings.Networks[idOrName] = endpointConfig - } else { - if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { - return err - } - } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - return nil -} - -// DisconnectFromNetwork disconnects container from network n. -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - if !container.Running { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - if _, ok := container.NetworkSettings.Networks[n.Name()]; ok { - delete(container.NetworkSettings.Networks, n.Name()) - } else { - return fmt.Errorf("container %s is not connected to the network %s", container.ID, n.Name()) - } - } else { - if err := disconnectFromNetwork(container, n, false); err != nil { - return err - } - } - - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) - return nil -} - -func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.IpcMode.Container() - c, err := daemon.GetContainer(containerID) - if err != nil { - return nil, err - } - if !c.IsRunning() { - return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) - } - if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return c, nil -} - -func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.PidMode.Container() - c, err := daemon.GetContainer(containerID) - if err != nil { - return nil, err - } - if !c.IsRunning() { - return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) - } - if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return c, nil -} - -func (daemon *Daemon) setupIpcDirs(c *container.Container) error { - var err error - - c.ShmPath, err = c.ShmResourcePath() - if err != nil { - return err - } - - if c.HostConfig.IpcMode.IsContainer() { - ic, err := daemon.getIpcContainer(c) - if err != nil { - return err - } - c.ShmPath = ic.ShmPath - } else if c.HostConfig.IpcMode.IsHost() { - if _, err := os.Stat("/dev/shm"); err != nil { - return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") - } - c.ShmPath = "/dev/shm" - } else { - rootUID, rootGID := daemon.GetRemappedUIDGID() - if !c.HasMountFor("/dev/shm") { - shmPath, err := c.ShmResourcePath() - if err != nil { - return err - } - - if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { - return err - } - - shmSize := container.DefaultSHMSize - if c.HostConfig.ShmSize != 0 { - shmSize = c.HostConfig.ShmSize - } - shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) - if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { - return fmt.Errorf("mounting shm tmpfs: %s", err) - } - if err := os.Chown(shmPath, rootUID, rootGID); err != nil { - return err - } - } - - } - - return nil -} - -func (daemon *Daemon) mountVolumes(container *container.Container) error { - mounts, err := daemon.setupMounts(container) - if err != nil { - return err - } - - for _, m := range mounts { - dest, err := container.GetResourcePath(m.Destination) - if err != nil { - return err - } - - var stat os.FileInfo - stat, err = os.Stat(m.Source) - if err != nil { - return err - } - if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - - opts := "rbind,ro" - if m.Writable { - opts = "rbind,rw" - } - - if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { - return err - } - - // mountVolumes() seems to be called for temporary mounts - // outside the container. Soon these will be unmounted with - // lazy unmount option and given we have mounted the rbind, - // all the submounts will propagate if these are shared. If - // daemon is running in host namespace and has / as shared - // then these unmounts will propagate and unmount original - // mount as well. So make all these mounts rprivate. - // Do not use propagation property of volume as that should - // apply only when mounting happen inside the container. - if err := mount.MakeRPrivate(dest); err != nil { - return err - } - } - - return nil -} - -func killProcessDirectly(container *container.Container) error { - if _, err := container.WaitStop(10 * time.Second); err != nil { - // Ensure that we don't kill ourselves - if pid := container.GetPID(); pid != 0 { - logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) - if err := syscall.Kill(pid, 9); err != nil { - if err != syscall.ESRCH { - return err - } - e := errNoSuchProcess{pid, 9} - logrus.Debug(e) - return e - } - } - } - return nil -} - -func specDevice(d *configs.Device) specs.Device { - return specs.Device{ - Type: string(d.Type), - Path: d.Path, - Major: d.Major, - Minor: d.Minor, - FileMode: fmPtr(int64(d.FileMode)), - UID: u32Ptr(int64(d.Uid)), - GID: u32Ptr(int64(d.Gid)), - } -} - -func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup { - t := string(d.Type) - return specs.DeviceCgroup{ - Allow: true, - Type: &t, - Major: &d.Major, - Minor: &d.Minor, - Access: &d.Permissions, - } -} - -func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { - resolvedPathOnHost := deviceMapping.PathOnHost - - // check if it is a symbolic link - if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { - if linkedPathOnHost, e := filepath.EvalSymlinks(deviceMapping.PathOnHost); e == nil { - resolvedPathOnHost = linkedPathOnHost - } - } - - device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions) - // if there was no error, return the device - if err == nil { - device.Path = deviceMapping.PathInContainer - return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil - } - - // if the device is not a device node - // try to see if it's a directory holding many devices - if err == devices.ErrNotADevice { - - // check if it is a directory - if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { - - // mount the internal devices recursively - filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { - childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) - if e != nil { - // ignore the device - return nil - } - - // add the device to userSpecified devices - childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1) - devs = append(devs, specDevice(childDevice)) - devPermissions = append(devPermissions, specDeviceCgroup(childDevice)) - - return nil - }) - } - } - - if len(devs) > 0 { - return devs, devPermissions, nil - } - - return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) -} - -func detachMounted(path string) error { - return syscall.Unmount(path, syscall.MNT_DETACH) -} - -func isLinkable(child *container.Container) bool { - // A container is linkable only if it belongs to the default network - _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - return ok -} - -func errRemovalContainer(containerID string) error { - return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) -} diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go deleted file mode 100644 index 32d18c2fa..000000000 --- a/daemon/container_operations_windows.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build windows - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - return fmt.Errorf("Windows does not support connecting a running container to a network") -} - -// DisconnectFromNetwork disconnects container from a network. -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - return fmt.Errorf("Windows does not support disconnecting a running container from a network") -} - -// getSize returns real size & virtual size -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - // TODO Windows - return 0, 0 -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work -// against containers which have volumes. You will still be able to cp -// to somewhere on the container drive, but not to any mounted volumes -// inside the container. Without this fix, docker cp is broken to any -// container which has a volume, regardless of where the file is inside the -// container. -func (daemon *Daemon) mountVolumes(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func isLinkable(child *container.Container) bool { - return false -} diff --git a/daemon/create.go b/daemon/create.go deleted file mode 100644 index 13424f475..000000000 --- a/daemon/create.go +++ /dev/null @@ -1,260 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - volumestore "github.com/docker/docker/volume/store" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/opencontainers/runc/libcontainer/label" -) - -// CreateManagedContainer creates a container that is managed by a Service -func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) { - return daemon.containerCreate(params, true, validateHostname) -} - -// ContainerCreate creates a regular container -func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) { - return daemon.containerCreate(params, false, validateHostname) -} - -func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool, validateHostname bool) (types.ContainerCreateResponse, error) { - if params.Config == nil { - return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container") - } - - warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false, validateHostname) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, err - } - - err = daemon.verifyNetworkingConfig(params.NetworkingConfig) - if err != nil { - return types.ContainerCreateResponse{}, err - } - - if params.HostConfig == nil { - params.HostConfig = &containertypes.HostConfig{} - } - err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, err - } - - container, err := daemon.create(params, managed) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) - } - - return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil -} - -// Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { - var ( - container *container.Container - img *image.Image - imgID image.ID - err error - ) - - if params.Config.Image != "" { - img, err = daemon.GetImage(params.Config.Image) - if err != nil { - return nil, err - } - imgID = img.ID() - } - - if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { - return nil, err - } - - if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { - return nil, err - } - - if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil { - return nil, err - } - defer func() { - if retErr != nil { - if err := daemon.cleanupContainer(container, true); err != nil { - logrus.Errorf("failed to cleanup container on create error: %v", err) - } - } - }() - - if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { - return nil, err - } - - container.HostConfig.StorageOpt = params.HostConfig.StorageOpt - - // Set RWLayer for container after mount labels have been set - if err := daemon.setRWLayer(container); err != nil { - return nil, err - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := daemon.setHostConfig(container, params.HostConfig); err != nil { - return nil, err - } - defer func() { - if retErr != nil { - if err := daemon.removeMountPoints(container, true); err != nil { - logrus.Error(err) - } - } - }() - - if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { - return nil, err - } - - var endpointsConfigs map[string]*networktypes.EndpointSettings - if params.NetworkingConfig != nil { - endpointsConfigs = params.NetworkingConfig.EndpointsConfig - } - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - - if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { - return nil, err - } - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving new container to disk: %v", err) - return nil, err - } - if err := daemon.Register(container); err != nil { - return nil, err - } - daemon.LogContainerEvent(container, "create") - return container, nil -} - -func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { - if ipcMode.IsHost() || pidMode.IsHost() || privileged { - return label.DisableSecOpt(), nil - } - - var ipcLabel []string - var pidLabel []string - ipcContainer := ipcMode.Container() - pidContainer := pidMode.Container() - if ipcContainer != "" { - c, err := daemon.GetContainer(ipcContainer) - if err != nil { - return nil, err - } - ipcLabel = label.DupSecOpt(c.ProcessLabel) - if pidContainer == "" { - return ipcLabel, err - } - } - if pidContainer != "" { - c, err := daemon.GetContainer(pidContainer) - if err != nil { - return nil, err - } - - pidLabel = label.DupSecOpt(c.ProcessLabel) - if ipcContainer == "" { - return pidLabel, err - } - } - - if pidLabel != nil && ipcLabel != nil { - for i := 0; i < len(pidLabel); i++ { - if pidLabel[i] != ipcLabel[i] { - return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") - } - } - return pidLabel, nil - } - return nil, nil -} - -func (daemon *Daemon) setRWLayer(container *container.Container) error { - var layerID layer.ChainID - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return err - } - layerID = img.RootFS.ChainID() - } - rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer, container.HostConfig.StorageOpt) - if err != nil { - return err - } - container.RWLayer = rwLayer - - return nil -} - -// VolumeCreate creates a volume with the specified name, driver, and opts -// This is called directly from the remote API -func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { - if name == "" { - name = stringid.GenerateNonCryptoID() - } - - v, err := daemon.volumes.Create(name, driverName, opts, labels) - if err != nil { - if volumestore.IsNameConflict(err) { - return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) - } - return nil, err - } - - daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - return apiV, nil -} - -func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { - if img != nil && img.Config != nil { - if err := merge(config, img.Config); err != nil { - return err - } - } - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return fmt.Errorf("No command specified") - } - return nil -} - -// Checks if the client set configurations for more than one network while creating a container -func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { - if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { - return nil - } - l := make([]string, 0, len(nwConfig.EndpointsConfig)) - for k := range nwConfig.EndpointsConfig { - l = append(l, k) - } - err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) - return errors.NewBadRequestError(err) -} diff --git a/daemon/create_unix.go b/daemon/create_unix.go deleted file mode 100644 index 37c4a911f..000000000 --- a/daemon/create_unix.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !windows - -package daemon - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/label" -) - -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - if err := daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return err - } - - for spec := range config.Volumes { - name := stringid.GenerateNonCryptoID() - destination := filepath.Clean(spec) - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(destination) { - continue - } - path, err := container.GetResourcePath(destination) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil && !stat.IsDir() { - return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) - } - - v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) - if err != nil { - return err - } - - if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { - return err - } - - container.AddMountPointWithVolume(destination, v, true) - } - return daemon.populateVolumes(container) -} - -// populateVolumes copies data from the container's rootfs into the volume for non-binds. -// this is only called when the container is created. -func (daemon *Daemon) populateVolumes(c *container.Container) error { - for _, mnt := range c.MountPoints { - if !mnt.CopyData || mnt.Volume == nil { - continue - } - - logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) - if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { - return err - } - } - return nil -} diff --git a/daemon/create_windows.go b/daemon/create_windows.go deleted file mode 100644 index d4da759fd..000000000 --- a/daemon/create_windows.go +++ /dev/null @@ -1,80 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" -) - -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - // Make sure the host config has the default daemon isolation if not specified by caller. - if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { - hostConfig.Isolation = daemon.defaultIsolation - } - - for spec := range config.Volumes { - - mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) - if err != nil { - return fmt.Errorf("Unrecognised volume spec: %v", err) - } - - // If the mountpoint doesn't have a name, generate one. - if len(mp.Name) == 0 { - mp.Name = stringid.GenerateNonCryptoID() - } - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(mp.Destination) { - continue - } - - volumeDriver := hostConfig.VolumeDriver - - // Create the volume in the volume driver. If it doesn't exist, - // a new one will be created. - v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) - if err != nil { - return err - } - - // FIXME Windows: This code block is present in the Linux version and - // allows the contents to be copied to the container FS prior to it - // being started. However, the function utilizes the FollowSymLinkInScope - // path which does not cope with Windows volume-style file paths. There - // is a separate effort to resolve this (@swernli), so this processing - // is deferred for now. A case where this would be useful is when - // a dockerfile includes a VOLUME statement, but something is created - // in that directory during the dockerfile processing. What this means - // on Windows for TP5 is that in that scenario, the contents will not - // copied, but that's (somewhat) OK as HCS will bomb out soon after - // at it doesn't support mapped directories which have contents in the - // destination path anyway. - // - // Example for repro later: - // FROM windowsservercore - // RUN mkdir c:\myvol - // RUN copy c:\windows\system32\ntdll.dll c:\myvol - // VOLUME "c:\myvol" - // - // Then - // docker build -t vol . - // docker run -it --rm vol cmd <-- This is where HCS will error out. - // - // // never attempt to copy existing content in a container FS to a shared volume - // if v.DriverName() == volume.DefaultDriverName { - // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { - // return err - // } - // } - - // Add it to container.MountPoints - container.AddMountPointWithVolume(mp.Destination, v, mp.RW) - } - return nil -} diff --git a/daemon/daemon.go b/daemon/daemon.go deleted file mode 100644 index c4b739800..000000000 --- a/daemon/daemon.go +++ /dev/null @@ -1,1055 +0,0 @@ -// Package daemon exposes the functions that occur on the host server -// that the Docker daemon is running. -// -// In implementing the various functions of the daemon, there is often -// a method-specific struct for configuring the runtime behavior. -package daemon - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/api" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork/cluster" - // register graph drivers - _ "github.com/docker/docker/daemon/graphdriver/register" - dmetadata "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/migrate/v1" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/graphdb" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libtrust" -) - -var ( - // DefaultRuntimeBinary is the default runtime to be used by - // containerd if none is specified - DefaultRuntimeBinary = "docker-runc" - - errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") -) - -// Daemon holds information about the Docker daemon. -type Daemon struct { - ID string - repository string - containers container.Store - execCommands *exec.Store - referenceStore reference.Store - downloadManager *xfer.LayerDownloadManager - uploadManager *xfer.LayerUploadManager - distributionMetadataStore dmetadata.Store - trustKey libtrust.PrivateKey - idIndex *truncindex.TruncIndex - configStore *Config - statsCollector *statsCollector - defaultLogConfig containertypes.LogConfig - RegistryService registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *store.VolumeStore - discoveryWatcher discoveryReloader - root string - seccompEnabled bool - shutdown bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - layerStore layer.Store - imageStore image.Store - nameIndex *registrar.Registrar - linkIndex *linkIndex - containerd libcontainerd.Client - containerdRemote libcontainerd.Remote - defaultIsolation containertypes.Isolation // Default isolation mode on Windows - clusterProvider cluster.Provider -} - -func (daemon *Daemon) restore() error { - var ( - debug = utils.IsDebugEnabled() - currentDriver = daemon.GraphDriverName() - containers = make(map[string]*container.Container) - ) - - if !debug { - logrus.Info("Loading containers: start.") - } - dir, err := ioutil.ReadDir(daemon.repository) - if err != nil { - return err - } - - containerCount := 0 - for _, v := range dir { - id := v.Name() - container, err := daemon.load(id) - if !debug && logrus.GetLevel() == logrus.InfoLevel { - fmt.Print(".") - containerCount++ - } - if err != nil { - logrus.Errorf("Failed to load container %v: %v", id, err) - continue - } - - // Ignore the container if it does not support the current driver being used by the graph - if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { - rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) - if err != nil { - logrus.Errorf("Failed to load container mount %v: %v", id, err) - continue - } - container.RWLayer = rwlayer - logrus.Debugf("Loaded container %v", container.ID) - - containers[container.ID] = container - } else { - logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) - } - } - - var migrateLegacyLinks bool - restartContainers := make(map[*container.Container]chan struct{}) - activeSandboxes := make(map[string]interface{}) - for _, c := range containers { - if err := daemon.registerName(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) - continue - } - if err := daemon.Register(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) - continue - } - - // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. - // We should rewrite it to use the daemon defaults. - // Fixes https://github.com/docker/docker/issues/22536 - if c.HostConfig.LogConfig.Type == "" { - if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { - logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) - continue - } - } - } - var wg sync.WaitGroup - var mapLock sync.Mutex - for _, c := range containers { - wg.Add(1) - go func(c *container.Container) { - defer wg.Done() - rm := c.RestartManager(false) - if c.IsRunning() || c.IsPaused() { - if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil { - logrus.Errorf("Failed to restore with containerd: %q", err) - return - } - if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { - options, err := daemon.buildSandboxOptions(c) - if err != nil { - logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) - } - mapLock.Lock() - activeSandboxes[c.NetworkSettings.SandboxID] = options - mapLock.Unlock() - } - - } - // fixme: only if not running - // get list of containers we need to restart - if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() { - mapLock.Lock() - restartContainers[c] = make(chan struct{}) - mapLock.Unlock() - } - - if c.RemovalInProgress { - // We probably crashed in the middle of a removal, reset - // the flag. - // - // We DO NOT remove the container here as we do not - // know if the user had requested for either the - // associated volumes, network links or both to also - // be removed. So we put the container in the "dead" - // state and leave further processing up to them. - logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) - c.ResetRemovalInProgress() - c.SetDead() - c.ToDisk() - } - - // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated - if c.HostConfig != nil && c.HostConfig.Links == nil { - migrateLegacyLinks = true - } - }(c) - } - wg.Wait() - daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) - if err != nil { - return fmt.Errorf("Error initializing network controller: %v", err) - } - - // migrate any legacy links from sqlite - linkdbFile := filepath.Join(daemon.root, "linkgraph.db") - var legacyLinkDB *graphdb.Database - if migrateLegacyLinks { - legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) - if err != nil { - return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) - } - defer legacyLinkDB.Close() - } - - // Now that all the containers are registered, register the links - for _, c := range containers { - if migrateLegacyLinks { - if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { - return err - } - } - if err := daemon.registerLinks(c, c.HostConfig); err != nil { - logrus.Errorf("failed to register link for container %s: %v", c.ID, err) - } - } - - group := sync.WaitGroup{} - for c, notifier := range restartContainers { - group.Add(1) - - go func(c *container.Container, chNotify chan struct{}) { - defer group.Done() - - logrus.Debugf("Starting container %s", c.ID) - - // ignore errors here as this is a best effort to wait for children to be - // running before we try to start the container - children := daemon.children(c) - timeout := time.After(5 * time.Second) - for _, child := range children { - if notifier, exists := restartContainers[child]; exists { - select { - case <-notifier: - case <-timeout: - } - } - } - - // Make sure networks are available before starting - daemon.waitForNetworks(c) - if err := daemon.containerStart(c); err != nil { - logrus.Errorf("Failed to start container %s: %s", c.ID, err) - } - close(chNotify) - }(c, notifier) - - } - group.Wait() - - // any containers that were started above would already have had this done, - // however we need to now prepare the mountpoints for the rest of the containers as well. - // This shouldn't cause any issue running on the containers that already had this run. - // This must be run after any containers with a restart policy so that containerized plugins - // can have a chance to be running before we try to initialize them. - for _, c := range containers { - // if the container has restart policy, do not - // prepare the mountpoints since it has been done on restarting. - // This is to speed up the daemon start when a restart container - // has a volume and the volume dirver is not available. - if _, ok := restartContainers[c]; ok { - continue - } - group.Add(1) - go func(c *container.Container) { - defer group.Done() - if err := daemon.prepareMountPoints(c); err != nil { - logrus.Error(err) - } - }(c) - } - - group.Wait() - - if !debug { - if logrus.GetLevel() == logrus.InfoLevel && containerCount > 0 { - fmt.Println() - } - logrus.Info("Loading containers: done.") - } - - return nil -} - -// waitForNetworks is used during daemon initialization when starting up containers -// It ensures that all of a container's networks are available before the daemon tries to start the container. -// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. -func (daemon *Daemon) waitForNetworks(c *container.Container) { - if daemon.discoveryWatcher == nil { - return - } - // Make sure if the container has a network that requires discovery that the discovery service is available before starting - for netName := range c.NetworkSettings.Networks { - // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready - // Most likely this is because the K/V store used for discovery is in a container and needs to be started - if _, err := daemon.netController.NetworkByName(netName); err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - continue - } - // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host - // FIXME: why is this slow??? - logrus.Debugf("Container %s waiting for network to be ready", c.Name) - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(60 * time.Second): - } - return - } - } -} - -func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.children(c) -} - -// parents returns the names of the parent containers of the container -// with the given name. -func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.parents(c) -} - -func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { - fullName := path.Join(parent.Name, alias) - if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { - if err == registrar.ErrNameReserved { - logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) - return nil - } - return err - } - daemon.linkIndex.link(parent, child, fullName) - return nil -} - -// SetClusterProvider sets a component for querying the current cluster state. -func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) { - daemon.clusterProvider = clusterProvider - daemon.netController.SetClusterProvider(clusterProvider) -} - -// IsSwarmCompatible verifies if the current daemon -// configuration is compatible with the swarm mode -func (daemon *Daemon) IsSwarmCompatible() error { - if daemon.configStore == nil { - return nil - } - return daemon.configStore.isSwarmCompatible() -} - -// NewDaemon sets up everything for the daemon to be able to service -// requests from the webserver. -func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { - setDefaultMtu(config) - - // Ensure that we have a correct root key limit for launching containers. - if err := ModifyRootKeyLimit(); err != nil { - logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err) - } - - // Ensure we have compatible and valid configuration options - if err := verifyDaemonSettings(config); err != nil { - return nil, err - } - - // Do we have a disabled network? - config.DisableBridge = isBridgeNetworkDisabled(config) - - // Verify the platform is supported as a daemon - if !platformSupported { - return nil, errSystemNotSupported - } - - // Validate platform-specific requirements - if err := checkSystem(); err != nil { - return nil, err - } - - // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event - // on Windows to dump Go routine stacks - setupDumpStackTrap() - - uidMaps, gidMaps, err := setupRemappedRoot(config) - if err != nil { - return nil, err - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - - // get the canonical path to the Docker root directory - var realRoot string - if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { - realRoot = config.Root - } else { - realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) - if err != nil { - return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) - } - } - - if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { - return nil, err - } - - if err := setupDaemonProcess(config); err != nil { - return nil, err - } - - // set up the tmpDir to use a canonical path - tmp, err := tempDir(config.Root, rootUID, rootGID) - if err != nil { - return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) - } - realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) - if err != nil { - return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) - } - os.Setenv("TMPDIR", realTmp) - - d := &Daemon{configStore: config} - // Ensure the daemon is properly shutdown if there is a failure during - // initialization - defer func() { - if err != nil { - if err := d.Shutdown(); err != nil { - logrus.Error(err) - } - } - }() - - // Set the default isolation mode (only applicable on Windows) - if err := d.setDefaultIsolation(); err != nil { - return nil, fmt.Errorf("error setting default isolation mode: %v", err) - } - - logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) - - if err := configureMaxThreads(config); err != nil { - logrus.Warnf("Failed to configure golang's threads limit: %v", err) - } - - installDefaultAppArmorProfile() - daemonRepo := filepath.Join(config.Root, "containers") - if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver - } - d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ - StorePath: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: driverName, - GraphDriverOptions: config.GraphOptions, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - }) - if err != nil { - return nil, err - } - - graphDriver := d.layerStore.DriverName() - imageRoot := filepath.Join(config.Root, "image", graphDriver) - - // Configure and validate the kernels security support - if err := configureKernelSecuritySupport(config, graphDriver); err != nil { - return nil, err - } - - logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) - d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) - logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) - d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) - - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return nil, err - } - - d.imageStore, err = image.NewImageStore(ifs, d.layerStore) - if err != nil { - return nil, err - } - - // Configure the volumes driver - volStore, err := d.configureVolumes(rootUID, rootGID) - if err != nil { - return nil, err - } - - trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) - if err != nil { - return nil, err - } - - trustDir := filepath.Join(config.Root, "trust") - - if err := system.MkdirAll(trustDir, 0700); err != nil { - return nil, err - } - - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) - if err != nil { - return nil, err - } - - eventsService := events.New() - - referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) - } - - migrationStart := time.Now() - if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { - logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) - } - logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) - - // Discovery is only enabled when the daemon is launched with an address to advertise. When - // initialized, the daemon is registered and we can store the discovery backend as its read-only - if err := d.initDiscovery(config); err != nil { - return nil, err - } - - sysInfo := sysinfo.New(false) - // Check if Devices cgroup is mounted, it is hard requirement for container security, - // on Linux. - if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { - return nil, fmt.Errorf("Devices cgroup isn't mounted") - } - - d.ID = trustKey.PublicKey().KeyID() - d.repository = daemonRepo - d.containers = container.NewMemoryStore() - d.execCommands = exec.NewStore() - d.referenceStore = referenceStore - d.distributionMetadataStore = distributionMetadataStore - d.trustKey = trustKey - d.idIndex = truncindex.NewTruncIndex([]string{}) - d.statsCollector = d.newStatsCollector(1 * time.Second) - d.defaultLogConfig = containertypes.LogConfig{ - Type: config.LogConfig.Type, - Config: config.LogConfig.Config, - } - d.RegistryService = registryService - d.EventsService = eventsService - d.volumes = volStore - d.root = config.Root - d.uidMaps = uidMaps - d.gidMaps = gidMaps - d.seccompEnabled = sysInfo.Seccomp - - d.nameIndex = registrar.NewRegistrar() - d.linkIndex = newLinkIndex() - d.containerdRemote = containerdRemote - - go d.execCommandGC() - - d.containerd, err = containerdRemote.Client(d) - if err != nil { - return nil, err - } - - if err := d.restore(); err != nil { - return nil, err - } - - return d, nil -} - -func (daemon *Daemon) shutdownContainer(c *container.Container) error { - // TODO(windows): Handle docker restart with paused containers - if c.IsPaused() { - // To terminate a process in freezer cgroup, we should send - // SIGTERM to this process then unfreeze it, and the process will - // force to terminate immediately. - logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) - sig, ok := signal.SignalMap["TERM"] - if !ok { - return fmt.Errorf("System does not support SIGTERM") - } - if err := daemon.kill(c, int(sig)); err != nil { - return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) - } - if err := daemon.containerUnpause(c); err != nil { - return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) - } - if _, err := c.WaitStop(10 * time.Second); err != nil { - logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID) - sig, ok := signal.SignalMap["KILL"] - if !ok { - return fmt.Errorf("System does not support SIGKILL") - } - if err := daemon.kill(c, int(sig)); err != nil { - logrus.Errorf("Failed to SIGKILL container %s", c.ID) - } - c.WaitStop(-1 * time.Second) - return err - } - } - // If container failed to exit in 10 seconds of SIGTERM, then using the force - if err := daemon.containerStop(c, 10); err != nil { - return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) - } - - c.WaitStop(-1 * time.Second) - return nil -} - -// Shutdown stops the daemon. -func (daemon *Daemon) Shutdown() error { - daemon.shutdown = true - // Keep mounts and networking running on daemon shutdown if - // we are to keep containers running and restore them. - if daemon.configStore.LiveRestore && daemon.containers != nil { - // check if there are any running containers, if none we should do some cleanup - if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { - return nil - } - } - - if daemon.containers != nil { - logrus.Debug("starting clean shutdown of all containers...") - daemon.containers.ApplyAll(func(c *container.Container) { - if !c.IsRunning() { - return - } - logrus.Debugf("stopping %s", c.ID) - if err := daemon.shutdownContainer(c); err != nil { - logrus.Errorf("Stop container error: %v", err) - return - } - if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { - daemon.cleanupMountsByID(mountid) - } - logrus.Debugf("container stopped %s", c.ID) - }) - } - - // trigger libnetwork Stop only if it's initialized - if daemon.netController != nil { - daemon.netController.Stop() - } - - if daemon.layerStore != nil { - if err := daemon.layerStore.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v", err) - } - } - - pluginShutdown() - - if err := daemon.cleanupMounts(); err != nil { - return err - } - - return nil -} - -// Mount sets container.BaseFS -// (is it not set coming in? why is it unset?) -func (daemon *Daemon) Mount(container *container.Container) error { - dir, err := container.RWLayer.Mount(container.GetMountLabel()) - if err != nil { - return err - } - logrus.Debugf("container mounted via layerStore: %v", dir) - - if container.BaseFS != dir { - // The mount path reported by the graph driver should always be trusted on Windows, since the - // volume path for a given mounted layer may change over time. This should only be an error - // on non-Windows operating systems. - if container.BaseFS != "" && runtime.GOOS != "windows" { - daemon.Unmount(container) - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.GraphDriverName(), container.ID, container.BaseFS, dir) - } - } - container.BaseFS = dir // TODO: combine these fields - return nil -} - -// Unmount unsets the container base filesystem -func (daemon *Daemon) Unmount(container *container.Container) error { - if err := container.RWLayer.Unmount(); err != nil { - logrus.Errorf("Error unmounting container %s: %s", container.ID, err) - return err - } - return nil -} - -func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { - progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) - operationCancelled := false - - for prog := range progressChan { - if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { - // don't log broken pipe errors as this is the normal case when a client aborts - if isBrokenPipe(err) { - logrus.Info("Pull session cancelled") - } else { - logrus.Errorf("error writing progress to client: %v", err) - } - cancelFunc() - operationCancelled = true - // Don't return, because we need to continue draining - // progressChan until it's closed to avoid a deadlock. - } - } -} - -func isBrokenPipe(e error) bool { - if netErr, ok := e.(*net.OpError); ok { - e = netErr.Err - if sysErr, ok := netErr.Err.(*os.SyscallError); ok { - e = sysErr.Err - } - } - return e == syscall.EPIPE -} - -// GraphDriverName returns the name of the graph driver used by the layer.Store -func (daemon *Daemon) GraphDriverName() string { - return daemon.layerStore.DriverName() -} - -// GetUIDGIDMaps returns the current daemon's user namespace settings -// for the full uid and gid maps which will be applied to containers -// started in this instance. -func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { - return daemon.uidMaps, daemon.gidMaps -} - -// GetRemappedUIDGID returns the current daemon's uid and gid values -// if user namespaces are in use for this daemon instance. If not -// this function will return "real" root values of 0, 0. -func (daemon *Daemon) GetRemappedUIDGID() (int, int) { - uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - return uid, gid -} - -// tempDir returns the default directory to use for temporary files. -func tempDir(rootDir string, rootUID, rootGID int) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) -} - -func (daemon *Daemon) setupInitLayer(initPath string) error { - rootUID, rootGID := daemon.GetRemappedUIDGID() - return setupInitLayer(initPath, rootUID, rootGID) -} - -func setDefaultMtu(config *Config) { - // do nothing if the config does not have the default 0 value. - if config.Mtu != 0 { - return - } - config.Mtu = defaultNetworkMtu -} - -func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { - volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) - if err != nil { - return nil, err - } - - if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { - return nil, fmt.Errorf("local volume driver could not be registered") - } - return store.New(daemon.configStore.Root) -} - -// IsShuttingDown tells whether the daemon is shutting down or not -func (daemon *Daemon) IsShuttingDown() bool { - return daemon.shutdown -} - -// initDiscovery initializes the discovery watcher for this daemon. -func (daemon *Daemon) initDiscovery(config *Config) error { - advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) - if err != nil { - if err == errDiscoveryDisabled { - return nil - } - return err - } - - config.ClusterAdvertise = advertise - discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - - daemon.discoveryWatcher = discoveryWatcher - return nil -} - -// Reload reads configuration changes and modifies the -// daemon according to those changes. -// These are the settings that Reload changes: -// - Daemon labels. -// - Daemon debug log level. -// - Daemon max concurrent downloads -// - Daemon max concurrent uploads -// - Cluster discovery (reconfigure and restart). -// - Daemon live restore -func (daemon *Daemon) Reload(config *Config) error { - var err error - // used to hold reloaded changes - attributes := map[string]string{} - - // We need defer here to ensure the lock is released as - // daemon.SystemInfo() will try to get it too - defer func() { - if err == nil { - daemon.LogDaemonEventWithAttributes("reload", attributes) - } - }() - - daemon.configStore.reloadLock.Lock() - defer daemon.configStore.reloadLock.Unlock() - - daemon.platformReload(config, &attributes) - - if err = daemon.reloadClusterDiscovery(config); err != nil { - return err - } - - if config.IsValueSet("labels") { - daemon.configStore.Labels = config.Labels - } - if config.IsValueSet("debug") { - daemon.configStore.Debug = config.Debug - } - if config.IsValueSet("live-restore") { - daemon.configStore.LiveRestore = config.LiveRestore - if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil { - return err - } - - } - - // If no value is set for max-concurrent-downloads we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { - *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads - } else { - maxConcurrentDownloads := defaultMaxConcurrentDownloads - daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads - } - logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) - if daemon.downloadManager != nil { - daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) - } - - // If no value is set for max-concurrent-upload we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { - *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads - } else { - maxConcurrentUploads := defaultMaxConcurrentUploads - daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads - } - logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) - if daemon.uploadManager != nil { - daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) - } - - // We emit daemon reload event here with updatable configurations - attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) - attributes["cluster-store"] = daemon.configStore.ClusterStore - if daemon.configStore.ClusterOpts != nil { - opts, _ := json.Marshal(daemon.configStore.ClusterOpts) - attributes["cluster-store-opts"] = string(opts) - } else { - attributes["cluster-store-opts"] = "{}" - } - attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise - if daemon.configStore.Labels != nil { - labels, _ := json.Marshal(daemon.configStore.Labels) - attributes["labels"] = string(labels) - } else { - attributes["labels"] = "[]" - } - attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) - attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) - - return nil -} - -func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { - var err error - newAdvertise := daemon.configStore.ClusterAdvertise - newClusterStore := daemon.configStore.ClusterStore - if config.IsValueSet("cluster-advertise") { - if config.IsValueSet("cluster-store") { - newClusterStore = config.ClusterStore - } - newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) - if err != nil && err != errDiscoveryDisabled { - return err - } - } - - if daemon.clusterProvider != nil { - if err := config.isSwarmCompatible(); err != nil { - return err - } - } - - // check discovery modifications - if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { - return nil - } - - // enable discovery for the first time if it was not previously enabled - if daemon.discoveryWatcher == nil { - discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - daemon.discoveryWatcher = discoveryWatcher - } else { - if err == errDiscoveryDisabled { - // disable discovery if it was previously enabled and it's disabled now - daemon.discoveryWatcher.Stop() - } else { - // reload discovery - if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { - return err - } - } - } - - daemon.configStore.ClusterStore = newClusterStore - daemon.configStore.ClusterOpts = config.ClusterOpts - daemon.configStore.ClusterAdvertise = newAdvertise - - if daemon.netController == nil { - return nil - } - netOptions, err := daemon.networkOptions(daemon.configStore, nil) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - return nil - } - err = daemon.netController.ReloadConfiguration(netOptions...) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - } - - return nil -} - -func isBridgeNetworkDisabled(config *Config) bool { - return config.bridgeConfig.Iface == disableNetworkBridge -} - -func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { - options := []nwconfig.Option{} - if dconfig == nil { - return options, nil - } - - options = append(options, nwconfig.OptionDataDir(dconfig.Root)) - - dd := runconfig.DefaultDaemonNetworkMode() - dn := runconfig.DefaultDaemonNetworkMode().NetworkName() - options = append(options, nwconfig.OptionDefaultDriver(string(dd))) - options = append(options, nwconfig.OptionDefaultNetwork(dn)) - - if strings.TrimSpace(dconfig.ClusterStore) != "" { - kv := strings.Split(dconfig.ClusterStore, "://") - if len(kv) != 2 { - return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") - } - options = append(options, nwconfig.OptionKVProvider(kv[0])) - options = append(options, nwconfig.OptionKVProviderURL(kv[1])) - } - if len(dconfig.ClusterOpts) > 0 { - options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) - } - - if daemon.discoveryWatcher != nil { - options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) - } - - if dconfig.ClusterAdvertise != "" { - options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) - } - - options = append(options, nwconfig.OptionLabels(dconfig.Labels)) - options = append(options, driverOptions(dconfig)...) - - if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 { - options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) - } - - return options, nil -} - -func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { - out := make([]types.BlkioStatEntry, len(entries)) - for i, re := range entries { - out[i] = types.BlkioStatEntry{ - Major: re.Major, - Minor: re.Minor, - Op: re.Op, - Value: re.Value, - } - } - return out -} diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go deleted file mode 100644 index 5244f1df2..000000000 --- a/daemon/daemon_experimental.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build experimental - -package daemon - -import ( - "github.com/docker/docker/plugin" - "github.com/docker/engine-api/types/container" -) - -func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { - return nil, nil -} - -func pluginShutdown() { - plugin.GetManager().Shutdown() -} diff --git a/daemon/daemon_linux.go b/daemon/daemon_linux.go deleted file mode 100644 index 9bdf6e2b7..000000000 --- a/daemon/daemon_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -package daemon - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/mount" -) - -func (daemon *Daemon) cleanupMountsByID(id string) error { - logrus.Debugf("Cleaning up old mountid %s: start.", id) - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return err - } - defer f.Close() - - return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) -} - -func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { - if daemon.root == "" { - return nil - } - var errors []string - - regexps := getCleanPatterns(id) - sc := bufio.NewScanner(reader) - for sc.Scan() { - if fields := strings.Fields(sc.Text()); len(fields) >= 4 { - if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { - for _, p := range regexps { - if p.MatchString(mnt) { - if err := unmount(mnt); err != nil { - logrus.Error(err) - errors = append(errors, err.Error()) - } - } - } - } - } - } - - if err := sc.Err(); err != nil { - return err - } - - if len(errors) > 0 { - return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) - } - - logrus.Debugf("Cleaning up old mountid %v: done.", id) - return nil -} - -// cleanupMounts umounts shm/mqueue mounts for old containers -func (daemon *Daemon) cleanupMounts() error { - return daemon.cleanupMountsByID("") -} - -func getCleanPatterns(id string) (regexps []*regexp.Regexp) { - var patterns []string - if id == "" { - id = "[0-9a-f]{64}" - patterns = append(patterns, "containers/"+id+"/shm") - } - patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") - for _, p := range patterns { - r, err := regexp.Compile(p) - if err == nil { - regexps = append(regexps, r) - } - } - return -} diff --git a/daemon/daemon_linux_test.go b/daemon/daemon_linux_test.go deleted file mode 100644 index c40b13ba4..000000000 --- a/daemon/daemon_linux_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build linux - -package daemon - -import ( - "strings" - "testing" -) - -const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio -143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 -145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 -146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 -149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset -150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu -151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct -152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory -153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices -154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer -155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio -156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event -157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb -158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k -169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -89 142 0:87 / /tmp rw,relatime - tmpfs none rw -97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw -100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio -116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k -118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio -242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw -120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio -171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio -310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw -` - -func TestCleanupMounts(t *testing.T) { - d := &Daemon{ - root: "/var/lib/docker/", - } - - expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" - var unmounted int - unmount := func(target string) error { - if target == expected { - unmounted++ - } - return nil - } - - d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) - - if unmounted != 1 { - t.Fatalf("Expected to unmount the shm (and the shm only)") - } -} - -func TestCleanupMountsByID(t *testing.T) { - d := &Daemon{ - root: "/var/lib/docker/", - } - - expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" - var unmounted int - unmount := func(target string) error { - if target == expected { - unmounted++ - } - return nil - } - - d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) - - if unmounted != 1 { - t.Fatalf("Expected to unmount the auf root (and that only)") - } -} - -func TestNotCleanupMounts(t *testing.T) { - d := &Daemon{ - repository: "", - } - var unmounted bool - unmount := func(target string) error { - unmounted = true - return nil - } - mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` - d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) - if unmounted { - t.Fatalf("Expected not to clean up /dev/shm") - } -} diff --git a/daemon/daemon_solaris.go b/daemon/daemon_solaris.go deleted file mode 100644 index 5c49af56c..000000000 --- a/daemon/daemon_solaris.go +++ /dev/null @@ -1,167 +0,0 @@ -// +build solaris,cgo - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" -) - -//#include -import "C" - -const ( - defaultVirtualSwitch = "Virtual Switch" - platformSupported = true - solarisMinCPUShares = 1 - solarisMaxCPUShares = 65535 -) - -func (daemon *Daemon) cleanupMountsByID(id string) error { - return nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - return nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - return nil -} - -func checkKernel() error { - // solaris can rely upon checkSystem() below, we don't skew kernel versions - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - return nil -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - // checkSystem validates platform-specific requirements - return nil -} - -func checkSystem() error { - // check OS version for compatibility, ensure running in global zone - var err error - var id C.zoneid_t - - if id, err = C.getzoneid(); err != nil { - return fmt.Errorf("Exiting. Error getting zone id: %+v", err) - } - if int(id) != 0 { - return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") - } - - v, err := kernel.GetKernelVersion() - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { - return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) - } - return err -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - return nil, nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return nil -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { - // Solaris has no custom images to register - return nil -} - -func driverOptions(config *Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - return nil, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - return types.RootFS{} -} - -func setupDaemonProcess(config *Config) error { - return nil -} diff --git a/daemon/daemon_stub.go b/daemon/daemon_stub.go deleted file mode 100644 index dd1fb09f1..000000000 --- a/daemon/daemon_stub.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !experimental - -package daemon - -import "github.com/docker/engine-api/types/container" - -func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { - return nil, nil -} - -func pluginShutdown() { -} diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go deleted file mode 100644 index 609ed9525..000000000 --- a/daemon/daemon_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/discovery" - _ "github.com/docker/docker/pkg/discovery/memory" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/volume" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// -// https://github.com/docker/docker/issues/8069 -// - -func TestGetContainer(t *testing.T) { - c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - Name: "tender_bardeen", - }, - } - - c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", - Name: "drunk_hawking", - }, - } - - c3 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", - Name: "3cdbd1aa", - }, - } - - c4 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", - Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - }, - } - - c5 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", - Name: "d22d69a2b896", - }, - } - - store := container.NewMemoryStore() - store.Add(c1.ID, c1) - store.Add(c2.ID, c2) - store.Add(c3.ID, c3) - store.Add(c4.ID, c4) - store.Add(c5.ID, c5) - - index := truncindex.NewTruncIndex([]string{}) - index.Add(c1.ID) - index.Add(c2.ID) - index.Add(c3.ID) - index.Add(c4.ID) - index.Add(c5.ID) - - daemon := &Daemon{ - containers: store, - idIndex: index, - nameIndex: registrar.NewRegistrar(), - } - - daemon.reserveName(c1.ID, c1.Name) - daemon.reserveName(c2.ID, c2.Name) - daemon.reserveName(c3.ID, c3.Name) - daemon.reserveName(c4.ID, c4.Name) - daemon.reserveName(c5.ID, c5.Name) - - if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { - t.Fatal("Should explicitly match full container IDs") - } - - if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { - t.Fatal("Should match a partial ID") - } - - if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { - t.Fatal("Should match a full name") - } - - // c3.Name is a partial match for both c3.ID and c2.ID - if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { - t.Fatal("Should match a full name even though it collides with another container's ID") - } - - if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { - t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") - } - - if _, err := daemon.GetContainer("3cdbd1"); err == nil { - t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") - } - - if _, err := daemon.GetContainer("nothing"); err == nil { - t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") - } -} - -func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { - var err error - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - daemon.volumes, err = store.New(tmp) - if err != nil { - return nil, err - } - - volumesDriver, err := local.New(tmp, 0, 0) - if err != nil { - return nil, err - } - volumedrivers.Register(volumesDriver, volumesDriver.Name()) - - return daemon, nil -} - -func TestValidContainerNames(t *testing.T) { - invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} - validNames := []string{"word-word", "word_word", "1weoid"} - - for _, name := range invalidNames { - if validContainerNamePattern.MatchString(name) { - t.Fatalf("%q is not a valid container name and was returned as valid.", name) - } - } - - for _, name := range validNames { - if !validContainerNamePattern.MatchString(name) { - t.Fatalf("%q is a valid container name and was returned as invalid.", name) - } - } -} - -func TestContainerInitDNS(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-container-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerID) - if err := os.MkdirAll(containerPath, 0755); err != nil { - t.Fatal(err) - } - - config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, -"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, -"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", -"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", -"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, -"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, -"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", -"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, -"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", -"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", -"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", -"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, -"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` - - // Container struct only used to retrieve path to config file - container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} - configPath, err := container.ConfigPath() - if err != nil { - t.Fatal(err) - } - if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { - t.Fatal(err) - } - - hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, -"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, -"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` - - hostConfigPath, err := container.HostConfigPath() - if err != nil { - t.Fatal(err) - } - if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { - t.Fatal(err) - } - - daemon, err := initDaemonWithVolumeStore(tmp) - if err != nil { - t.Fatal(err) - } - defer volumedrivers.Unregister(volume.DefaultDriverName) - - c, err := daemon.load(containerID) - if err != nil { - t.Fatal(err) - } - - if c.HostConfig.DNS == nil { - t.Fatal("Expected container DNS to not be nil") - } - - if c.HostConfig.DNSSearch == nil { - t.Fatal("Expected container DNSSearch to not be nil") - } - - if c.HostConfig.DNSOptions == nil { - t.Fatal("Expected container DNSOptions to not be nil") - } -} - -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestMerge(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - portsImage := make(nat.PortSet) - portsImage[newPortNoError("tcp", "1111")] = struct{}{} - portsImage[newPortNoError("tcp", "2222")] = struct{}{} - configImage := &containertypes.Config{ - ExposedPorts: portsImage, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumesImage, - } - - portsUser := make(nat.PortSet) - portsUser[newPortNoError("tcp", "2222")] = struct{}{} - portsUser[newPortNoError("tcp", "3333")] = struct{}{} - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &containertypes.Config{ - ExposedPorts: portsUser, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := merge(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) - } - } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) - } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) - } - } - - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) - } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) - } - } - - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &containertypes.Config{ - ExposedPorts: ports, - } - - if err := merge(configUser, configImage2); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) - } - } -} - -func TestDaemonReloadLabels(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - daemon.Reload(newConfig) - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } -} - -func TestDaemonReloadNotAffectOthers(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - Debug: true, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - daemon.Reload(newConfig) - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } - debug := daemon.configStore.Debug - if !debug { - t.Fatalf("Expected debug 'enabled', got 'disabled'") - } -} - -func TestDaemonDiscoveryReload(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - ClusterAdvertise: "127.0.0.1:3333", - }, - } - - if err := daemon.initDiscovery(daemon.configStore); err != nil { - t.Fatal(err) - } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - - valuesSets := make(map[string]interface{}) - valuesSets["cluster-store"] = "memory://127.0.0.1:2222" - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - - expected = discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - ch, errCh = daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{} - - valuesSet := make(map[string]interface{}) - valuesSet["cluster-store"] = "memory://127.0.0.1:2222" - valuesSet["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSet, - }, - } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - }, - } - valuesSets := make(map[string]interface{}) - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(10 * time.Second): - t.Fatal("Timeout waiting for discovery") - } - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - -} diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go deleted file mode 100644 index 15015b8e7..000000000 --- a/daemon/daemon_unix.go +++ /dev/null @@ -1,1157 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/blkiodev" - pblkiodev "github.com/docker/engine-api/types/blkiodev" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/drivers/bridge" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/netutils" - "github.com/docker/libnetwork/options" - lntypes "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/specs/specs-go" -) - -const ( - // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 - linuxMinCPUShares = 2 - linuxMaxCPUShares = 262144 - platformSupported = true - // It's not kernel limit, we want this 4M limit to supply a reasonable functional container - linuxMinMemory = 4194304 - // constants for remapped root settings - defaultIDSpecifier string = "default" - defaultRemappedID string = "dockremap" - - // constant for cgroup drivers - cgroupFsDriver = "cgroupfs" - cgroupSystemdDriver = "systemd" -) - -func getMemoryResources(config containertypes.Resources) *specs.Memory { - memory := specs.Memory{} - - if config.Memory > 0 { - limit := uint64(config.Memory) - memory.Limit = &limit - } - - if config.MemoryReservation > 0 { - reservation := uint64(config.MemoryReservation) - memory.Reservation = &reservation - } - - if config.MemorySwap != 0 { - swap := uint64(config.MemorySwap) - memory.Swap = &swap - } - - if config.MemorySwappiness != nil { - swappiness := uint64(*config.MemorySwappiness) - memory.Swappiness = &swappiness - } - - if config.KernelMemory != 0 { - kernelMemory := uint64(config.KernelMemory) - memory.Kernel = &kernelMemory - } - - return &memory -} - -func getCPUResources(config containertypes.Resources) *specs.CPU { - cpu := specs.CPU{} - - if config.CPUShares != 0 { - shares := uint64(config.CPUShares) - cpu.Shares = &shares - } - - if config.CpusetCpus != "" { - cpuset := config.CpusetCpus - cpu.Cpus = &cpuset - } - - if config.CpusetMems != "" { - cpuset := config.CpusetMems - cpu.Mems = &cpuset - } - - if config.CPUPeriod != 0 { - period := uint64(config.CPUPeriod) - cpu.Period = &period - } - - if config.CPUQuota != 0 { - quota := uint64(config.CPUQuota) - cpu.Quota = "a - } - - return &cpu -} - -func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { - var stat syscall.Stat_t - var blkioWeightDevices []specs.WeightDevice - - for _, weightDevice := range config.BlkioWeightDevice { - if err := syscall.Stat(weightDevice.Path, &stat); err != nil { - return nil, err - } - weight := weightDevice.Weight - d := specs.WeightDevice{Weight: &weight} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - blkioWeightDevices = append(blkioWeightDevices, d) - } - - return blkioWeightDevices, nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - var ( - labelOpts []string - err error - ) - - for _, opt := range config.SecurityOpt { - if opt == "no-new-privileges" { - container.NoNewPrivileges = true - } else { - var con []string - if strings.Contains(opt, "=") { - con = strings.SplitN(opt, "=", 2) - } else if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.13, use `=` instead.") - } - - if len(con) != 2 { - return fmt.Errorf("Invalid --security-opt 1: %q", opt) - } - - switch con[0] { - case "label": - labelOpts = append(labelOpts, con[1]) - case "apparmor": - container.AppArmorProfile = con[1] - case "seccomp": - container.SeccompProfile = con[1] - default: - return fmt.Errorf("Invalid --security-opt 2: %q", opt) - } - } - } - - container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) - return err -} - -func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { - var throttleDevices []specs.ThrottleDevice - var stat syscall.Stat_t - - for _, d := range devs { - if err := syscall.Stat(d.Path, &stat); err != nil { - return nil, err - } - rate := d.Rate - d := specs.ThrottleDevice{Rate: &rate} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - throttleDevices = append(throttleDevices, d) - } - - return throttleDevices, nil -} - -func checkKernelVersion(k, major, minor int) bool { - if v, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} - -func checkKernel() error { - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.10 crashes are clearer. - // For details see https://github.com/docker/docker/issues/407 - // Docker 1.11 and above doesn't actually run on kernels older than 3.4, - // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). - if !checkKernelVersion(3, 10, 0) { - v, _ := kernel.GetKernelVersion() - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) - } - } - return nil -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if adjustCPUShares && hostConfig.CPUShares > 0 { - // Handle unsupported CPUShares - if hostConfig.CPUShares < linuxMinCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) - hostConfig.CPUShares = linuxMinCPUShares - } else if hostConfig.CPUShares > linuxMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) - hostConfig.CPUShares = linuxMaxCPUShares - } - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { - // By default, MemorySwap is set to twice the size of Memory. - hostConfig.MemorySwap = hostConfig.Memory * 2 - } - if hostConfig.ShmSize == 0 { - hostConfig.ShmSize = container.DefaultSHMSize - } - var err error - if hostConfig.SecurityOpt == nil { - hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) - if err != nil { - return err - } - } - if hostConfig.MemorySwappiness == nil { - defaultSwappiness := int64(-1) - hostConfig.MemorySwappiness = &defaultSwappiness - } - if hostConfig.OomKillDisable == nil { - defaultOomKillDisable := false - hostConfig.OomKillDisable = &defaultOomKillDisable - } - - return nil -} - -func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { - warnings := []string{} - - // memory subsystem checks and adjustments - if resources.Memory != 0 && resources.Memory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") - } - if resources.Memory > 0 && !sysInfo.MemoryLimit { - warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support memory limit capabilities. Limitation discarded.") - resources.Memory = 0 - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { - warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") - logrus.Warn("Your kernel does not support swap limit capabilities, memory limited without swap.") - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { - return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") - } - if resources.Memory == 0 && resources.MemorySwap > 0 && !update { - return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") - } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { - warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - logrus.Warn("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - resources.MemorySwappiness = nil - } - if resources.MemorySwappiness != nil { - swappiness := *resources.MemorySwappiness - if swappiness < -1 || swappiness > 100 { - return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) - } - } - if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { - warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support memory soft limit capabilities. Limitation discarded.") - resources.MemoryReservation = 0 - } - if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") - } - if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { - return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage") - } - if resources.KernelMemory > 0 && !sysInfo.KernelMemory { - warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - resources.KernelMemory = 0 - } - if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") - } - if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { - warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - } - if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { - // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point - // warning the caller if they already wanted the feature to be off - if *resources.OomKillDisable { - warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") - logrus.Warn("Your kernel does not support OomKillDisable, OomKillDisable discarded.") - } - resources.OomKillDisable = nil - } - - if resources.PidsLimit != 0 && !sysInfo.PidsLimit { - warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.") - logrus.Warn("Your kernel does not support pids limit capabilities, pids limit discarded.") - resources.PidsLimit = 0 - } - - // cpu subsystem checks and adjustments - if resources.CPUShares > 0 && !sysInfo.CPUShares { - warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") - logrus.Warn("Your kernel does not support CPU shares. Shares discarded.") - resources.CPUShares = 0 - } - if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { - warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") - logrus.Warn("Your kernel does not support CPU cfs period. Period discarded.") - resources.CPUPeriod = 0 - } - if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { - return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") - } - if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { - warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") - logrus.Warn("Your kernel does not support CPU cfs quota. Quota discarded.") - resources.CPUQuota = 0 - } - if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { - return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") - } - if resources.CPUPercent > 0 { - warnings = append(warnings, "%s does not support CPU percent. Percent discarded.", runtime.GOOS) - logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) - resources.CPUPercent = 0 - } - - // cpuset subsystem checks and adjustments - if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { - warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") - logrus.Warn("Your kernel does not support cpuset. Cpuset discarded.") - resources.CpusetCpus = "" - resources.CpusetMems = "" - } - cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) - } - if !cpusAvailable { - return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) - } - memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) - } - if !memsAvailable { - return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) - } - - // blkio subsystem checks and adjustments - if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { - warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") - logrus.Warn("Your kernel does not support Block I/O weight. Weight discarded.") - resources.BlkioWeight = 0 - } - if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { - return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") - } - if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { - return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) - } - if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { - warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") - logrus.Warn("Your kernel does not support Block I/O weight_device. Weight-device discarded.") - resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} - } - if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { - warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") - logrus.Warn("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") - resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { - warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") - logrus.Warn("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") - resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { - warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") - logrus.Warn("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") - resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { - warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") - logrus.Warn("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") - resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} - } - - return warnings, nil -} - -func (daemon *Daemon) getCgroupDriver() string { - cgroupDriver := cgroupFsDriver - - if UsingSystemd(daemon.configStore) { - cgroupDriver = cgroupSystemdDriver - } - return cgroupDriver -} - -// getCD gets the raw value of the native.cgroupdriver option, if set. -func getCD(config *Config) string { - for _, option := range config.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { - continue - } - return val - } - return "" -} - -// VerifyCgroupDriver validates native.cgroupdriver -func VerifyCgroupDriver(config *Config) error { - cd := getCD(config) - if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { - return nil - } - return fmt.Errorf("native.cgroupdriver option %s not supported", cd) -} - -// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd -func UsingSystemd(config *Config) bool { - return getCD(config) == cgroupSystemdDriver -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - sysInfo := sysinfo.New(true) - - warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) - if err != nil { - return warnings, err - } - - w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - - if hostConfig.ShmSize < 0 { - return warnings, fmt.Errorf("SHM size must be greater than 0") - } - - if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { - return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) - } - - // ip-forwarding does not affect container with '--net=host' (or '--net=none') - if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { - warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") - logrus.Warn("IPv4 forwarding is disabled. Networking will not work") - } - // check for various conflicting options with user namespaces - if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { - if hostConfig.Privileged { - return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") - } - if hostConfig.NetworkMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") - } - if hostConfig.PidMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") - } - if hostConfig.ReadonlyRootfs { - return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") - } - } - if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { - // CgroupParent for systemd cgroup should be named as "xxx.slice" - if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { - return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - if hostConfig.Runtime == "" { - hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - } - - if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { - return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) - } - - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { - if config.IsValueSet("runtimes") { - daemon.configStore.Runtimes = config.Runtimes - // Always set the default one - daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} - } - - if config.DefaultRuntime != "" { - daemon.configStore.DefaultRuntime = config.DefaultRuntime - } - - // Update attributes - var runtimeList bytes.Buffer - for name, rt := range daemon.configStore.Runtimes { - if runtimeList.Len() > 0 { - runtimeList.WriteRune(' ') - } - runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) - } - - (*attributes)["runtimes"] = runtimeList.String() - (*attributes)["default-runtime"] = daemon.configStore.DefaultRuntime -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - // Check for mutually incompatible config options - if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { - return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") - } - if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { - return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") - } - if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { - config.bridgeConfig.EnableIPMasq = false - } - if err := VerifyCgroupDriver(config); err != nil { - return err - } - if config.CgroupParent != "" && UsingSystemd(config) { - if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { - return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - - if config.DefaultRuntime == "" { - config.DefaultRuntime = stockRuntimeName - } - if config.Runtimes == nil { - config.Runtimes = make(map[string]types.Runtime) - } - stockRuntimeOpts := []string{} - if UsingSystemd(config) { - stockRuntimeOpts = append(stockRuntimeOpts, "--systemd-cgroup=true") - } - config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} - - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - if os.Geteuid() != 0 { - return fmt.Errorf("The Docker daemon needs to be run as root") - } - return checkKernel() -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { - mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") - if err != nil { - return err - } - mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) - if err != nil { - return err - } - maxThreads := (mtint / 100) * 90 - debug.SetMaxThreads(maxThreads) - logrus.Debugf("Golang's threads limit set to %d", maxThreads) - return nil -} - -// configureKernelSecuritySupport configures and validates security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - if config.EnableSelinuxSupport { - if selinuxEnabled() { - // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled - if driverName == "overlay" { - return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) - } - logrus.Debug("SELinux enabled successfully") - } else { - logrus.Warn("Docker could not enable SELinux on the host system") - } - } else { - selinuxSetDisabled() - } - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, activeSandboxes) - if err != nil { - return nil, err - } - - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - if len(activeSandboxes) > 0 { - logrus.Infof("There are old running containers, the network config will not take affect") - return controller, nil - } - - // Initialize default network on "null" - if n, _ := controller.NetworkByName("none"); n == nil { - if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) - } - } - - // Initialize default network on "host" - if n, _ := controller.NetworkByName("host"); n == nil { - if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) - } - } - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func driverOptions(config *Config) []nwconfig.Option { - bridgeConfig := options.Generic{ - "EnableIPForwarding": config.bridgeConfig.EnableIPForward, - "EnableIPTables": config.bridgeConfig.EnableIPTables, - "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy} - bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} - - dOptions := []nwconfig.Option{} - dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) - return dOptions -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - if n, err := controller.NetworkByName("bridge"); err == nil { - if err = n.Delete(); err != nil { - return fmt.Errorf("could not delete the default bridge network: %v", err) - } - } - - bridgeName := bridge.DefaultBridgeName - if config.bridgeConfig.Iface != "" { - bridgeName = config.bridgeConfig.Iface - } - netOption := map[string]string{ - bridge.BridgeName: bridgeName, - bridge.DefaultBridge: strconv.FormatBool(true), - netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), - bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), - } - - // --ip processing - if config.bridgeConfig.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() - } - - var ( - ipamV4Conf *libnetwork.IpamConf - ipamV6Conf *libnetwork.IpamConf - ) - - ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - - nw, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) - if err == nil { - ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() - hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) - if hip.IsGlobalUnicast() { - ipamV4Conf.Gateway = nw.IP.String() - } - } - - if config.bridgeConfig.IP != "" { - ipamV4Conf.PreferredPool = config.bridgeConfig.IP - ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) - if err != nil { - return err - } - ipamV4Conf.Gateway = ip.String() - } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { - logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) - } - - if config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) - if err != nil { - return err - } - - ipamV4Conf.SubPool = fCIDR.String() - } - - if config.bridgeConfig.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() - } - - var deferIPv6Alloc bool - if config.bridgeConfig.FixedCIDRv6 != "" { - _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) - if err != nil { - return err - } - - // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has - // at least 48 host bits, we need to guarantee the current behavior where the containers' - // IPv6 addresses will be constructed based on the containers' interface MAC address. - // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints - // on this network until after the driver has created the endpoint and returned the - // constructed address. Libnetwork will then reserve this address with the ipam driver. - ones, _ := fCIDRv6.Mask.Size() - deferIPv6Alloc = ones <= 80 - - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.PreferredPool = fCIDRv6.String() - - // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 - // address belongs to the same network, we need to inform libnetwork about it, so - // that it can be reserved with IPAM and it will not be given away to somebody else - for _, nw6 := range nw6List { - if fCIDRv6.Contains(nw6.IP) { - ipamV6Conf.Gateway = nw6.IP.String() - break - } - } - } - - if config.bridgeConfig.DefaultGatewayIPv6 != nil { - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() - } - - v4Conf := []*libnetwork.IpamConf{ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - if ipamV6Conf != nil { - v6Conf = append(v6Conf, ipamV6Conf) - } - // Initialize default network on "bridge" with the same name - _, err = controller.NewNetwork("bridge", "bridge", "", - libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(netOption), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) - if err != nil { - return fmt.Errorf("Error creating default \"bridge\" network: %v", err) - } - return nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting things into the container. -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - "/etc/mtab": "/proc/mounts", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = filepath.Join(prev, p) - syscall.Unlink(filepath.Join(initLayer, prev)) - } - - if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { - return err - } - switch typ { - case "dir": - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { - return err - } - case "file": - f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Chown(rootUID, rootGID) - f.Close() - default: - if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { - return err - } - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} - -// Parse the remapped root (user namespace) option, which can be one of: -// username - valid username from /etc/passwd -// username:groupname - valid username; valid groupname from /etc/group -// uid - 32-bit unsigned int valid Linux UID value -// uid:gid - uid value; 32-bit unsigned int Linux GID value -// -// If no groupname is specified, and a username is specified, an attempt -// will be made to lookup a gid for that username as a groupname -// -// If names are used, they are verified to exist in passwd/group -func parseRemappedRoot(usergrp string) (string, string, error) { - - var ( - userID, groupID int - username, groupname string - ) - - idparts := strings.Split(usergrp, ":") - if len(idparts) > 2 { - return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) - } - - if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { - // must be a uid; take it as valid - userID = int(uid) - luser, err := user.LookupUid(userID) - if err != nil { - return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) - } - username = luser.Name - if len(idparts) == 1 { - // if the uid was numeric and no gid was specified, take the uid as the gid - groupID = userID - lgrp, err := user.LookupGid(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) - } - groupname = lgrp.Name - } - } else { - lookupName := idparts[0] - // special case: if the user specified "default", they want Docker to create or - // use (after creation) the "dockremap" user/group for root remapping - if lookupName == defaultIDSpecifier { - lookupName = defaultRemappedID - } - luser, err := user.LookupUser(lookupName) - if err != nil && idparts[0] != defaultIDSpecifier { - // error if the name requested isn't the special "dockremap" ID - return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) - } else if err != nil { - // special case-- if the username == "default", then we have been asked - // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} - // ranges will be used for the user and group mappings in user namespaced containers - _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) - if err == nil { - return defaultRemappedID, defaultRemappedID, nil - } - return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) - } - username = luser.Name - if len(idparts) == 1 { - // we only have a string username, and no group specified; look up gid from username as group - group, err := user.LookupGroup(lookupName) - if err != nil { - return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) - } - groupID = group.Gid - groupname = group.Name - } - } - - if len(idparts) == 2 { - // groupname or gid is separately specified and must be resolved - // to an unsigned 32-bit gid - if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { - // must be a gid, take it as valid - groupID = int(gid) - lgrp, err := user.LookupGid(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) - } - groupname = lgrp.Name - } else { - // not a number; attempt a lookup - if _, err := user.LookupGroup(idparts[1]); err != nil { - return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) - } - groupname = idparts[1] - } - } - return username, groupname, nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - if runtime.GOOS != "linux" && config.RemappedRoot != "" { - return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") - } - - // if the daemon was started with remapped root option, parse - // the config option to the int uid,gid values - var ( - uidMaps, gidMaps []idtools.IDMap - ) - if config.RemappedRoot != "" { - username, groupname, err := parseRemappedRoot(config.RemappedRoot) - if err != nil { - return nil, nil, err - } - if username == "root" { - // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op - // effectively - logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") - return uidMaps, gidMaps, nil - } - logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) - // update remapped root setting now that we have resolved them to actual names - config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) - - uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) - if err != nil { - return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) - } - } - return uidMaps, gidMaps, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - config.Root = rootDir - // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) - // so that syscalls executing as non-root, operating on subdirectories of the graph root - // (e.g. mounted layers of a container) can traverse this path. - // The user namespace support will create subdirectories for the remapped root host uid:gid - // pair owned by that same uid:gid pair for proper write access to those needed metadata and - // layer content subtrees. - if _, err := os.Stat(rootDir); err == nil { - // root current exists; verify the access bits are correct by setting them - if err = os.Chmod(rootDir, 0711); err != nil { - return err - } - } else if os.IsNotExist(err) { - // no root exists yet, create it 0711 with root:root ownership - if err := os.MkdirAll(rootDir, 0711); err != nil { - return err - } - } - - // if user namespaces are enabled we will create a subtree underneath the specified root - // with any/all specified remapped root uid/gid options on the daemon creating - // a new subdirectory with ownership set to the remapped uid/gid (so as to allow - // `chdir()` to work for containers namespaced to that uid/gid) - if config.RemappedRoot != "" { - config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) - logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) - // Create the root directory if it doesn't exist - if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { - return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) - } - } - return nil -} - -// registerLinks writes the links to a file. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { - return nil - } - - for _, l := range hostConfig.Links { - name, alias, err := runconfigopts.ParseLink(l) - if err != nil { - return err - } - child, err := daemon.GetContainer(name) - if err != nil { - return fmt.Errorf("Could not get container for %s", name) - } - for child.HostConfig.NetworkMode.IsContainer() { - parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) - child, err = daemon.GetContainer(parts[1]) - if err != nil { - return fmt.Errorf("Could not get container for %s", parts[1]) - } - } - if child.HostConfig.NetworkMode.IsHost() { - return runconfig.ErrConflictHostNetworkAndLinks - } - if err := daemon.registerLink(container, child, alias); err != nil { - return err - } - } - - // After we load all the links into the daemon - // set them to nil on the hostconfig - return container.WriteHostConfig() -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return daemon.Mount(container) -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - if !c.IsRunning() { - return nil, errNotRunning{c.ID} - } - stats, err := daemon.containerd.Stats(c.ID) - if err != nil { - return nil, err - } - s := &types.StatsJSON{} - cgs := stats.CgroupStats - if cgs != nil { - s.BlkioStats = types.BlkioStats{ - IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), - IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), - IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), - IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), - IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), - IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), - IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), - SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), - } - cpu := cgs.CpuStats - s.CPUStats = types.CPUStats{ - CPUUsage: types.CPUUsage{ - TotalUsage: cpu.CpuUsage.TotalUsage, - PercpuUsage: cpu.CpuUsage.PercpuUsage, - UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, - UsageInUsermode: cpu.CpuUsage.UsageInUsermode, - }, - ThrottlingData: types.ThrottlingData{ - Periods: cpu.ThrottlingData.Periods, - ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, - ThrottledTime: cpu.ThrottlingData.ThrottledTime, - }, - } - mem := cgs.MemoryStats.Usage - s.MemoryStats = types.MemoryStats{ - Usage: mem.Usage, - MaxUsage: mem.MaxUsage, - Stats: cgs.MemoryStats.Stats, - Failcnt: mem.Failcnt, - Limit: mem.Limit, - } - // if the container does not set memory limit, use the machineMemory - if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { - s.MemoryStats.Limit = daemon.statsCollector.machineMemory - } - if cgs.PidsStats != nil { - s.PidsStats = types.PidsStats{ - Current: cgs.PidsStats.Current, - } - } - } - s.Read = time.Unix(int64(stats.Timestamp), 0) - return s, nil -} - -// setDefaultIsolation determines the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - } -} - -// setupDaemonProcess sets various settings for the daemon's process -func setupDaemonProcess(config *Config) error { - // setup the daemons oom_score_adj - return setupOOMScoreAdj(config.OOMScoreAdjust) -} - -func setupOOMScoreAdj(score int) error { - f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) - if err != nil { - return err - } - _, err = f.WriteString(strconv.Itoa(score)) - f.Close() - return err -} diff --git a/daemon/daemon_unix_test.go b/daemon/daemon_unix_test.go deleted file mode 100644 index fae84bab6..000000000 --- a/daemon/daemon_unix_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// +build !windows - -package daemon - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/container" - containertypes "github.com/docker/engine-api/types/container" -) - -// Unix test as uses settings which are not available on Windows -func TestAdjustCPUShares(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - - hostConfig := &containertypes.HostConfig{ - Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, - } - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != linuxMinCPUShares { - t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) - } - - hostConfig.CPUShares = linuxMaxCPUShares + 1 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != linuxMaxCPUShares { - t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) - } - - hostConfig.CPUShares = 0 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != 0 { - t.Error("Expected CPUShares to be unchanged") - } - - hostConfig.CPUShares = 1024 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != 1024 { - t.Error("Expected CPUShares to be unchanged") - } -} - -// Unix test as uses settings which are not available on Windows -func TestAdjustCPUSharesNoAdjustment(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - - hostConfig := &containertypes.HostConfig{ - Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, - } - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != linuxMinCPUShares-1 { - t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) - } - - hostConfig.CPUShares = linuxMaxCPUShares + 1 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != linuxMaxCPUShares+1 { - t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) - } - - hostConfig.CPUShares = 0 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != 0 { - t.Error("Expected CPUShares to be unchanged") - } - - hostConfig.CPUShares = 1024 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != 1024 { - t.Error("Expected CPUShares to be unchanged") - } -} - -// Unix test as uses settings which are not available on Windows -func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { - container := &container.Container{} - config := &containertypes.HostConfig{} - - // test apparmor - config.SecurityOpt = []string{"apparmor=test_profile"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.AppArmorProfile != "test_profile" { - t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) - } - - // test seccomp - sp := "/path/to/seccomp_test.json" - config.SecurityOpt = []string{"seccomp=" + sp} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.SeccompProfile != sp { - t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) - } - - // test valid label - config.SecurityOpt = []string{"label=user:USER"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - - // test invalid label - config.SecurityOpt = []string{"label"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } - - // test invalid opt - config.SecurityOpt = []string{"test"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } -} - -func TestParseSecurityOpt(t *testing.T) { - container := &container.Container{} - config := &containertypes.HostConfig{} - - // test apparmor - config.SecurityOpt = []string{"apparmor=test_profile"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.AppArmorProfile != "test_profile" { - t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) - } - - // test seccomp - sp := "/path/to/seccomp_test.json" - config.SecurityOpt = []string{"seccomp=" + sp} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.SeccompProfile != sp { - t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) - } - - // test valid label - config.SecurityOpt = []string{"label=user:USER"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - - // test invalid label - config.SecurityOpt = []string{"label"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } - - // test invalid opt - config.SecurityOpt = []string{"test"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } -} - -func TestNetworkOptions(t *testing.T) { - daemon := &Daemon{} - dconfigCorrect := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "consul://localhost:8500", - ClusterAdvertise: "192.168.0.1:8000", - }, - } - - if _, err := daemon.networkOptions(dconfigCorrect, nil); err != nil { - t.Fatalf("Expect networkOptions success, got error: %v", err) - } - - dconfigWrong := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "consul://localhost:8500://test://bbb", - }, - } - - if _, err := daemon.networkOptions(dconfigWrong, nil); err == nil { - t.Fatalf("Expected networkOptions error, got nil") - } -} diff --git a/daemon/daemon_unsupported.go b/daemon/daemon_unsupported.go deleted file mode 100644 index cb1acf63d..000000000 --- a/daemon/daemon_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux,!freebsd,!windows,!solaris - -package daemon - -const platformSupported = false diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go deleted file mode 100644 index f6efe5478..000000000 --- a/daemon/daemon_windows.go +++ /dev/null @@ -1,437 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "strings" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - pblkiodev "github.com/docker/engine-api/types/blkiodev" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - winlibnetwork "github.com/docker/libnetwork/drivers/windows" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - blkiodev "github.com/opencontainers/runc/libcontainer/configs" -) - -const ( - defaultNetworkSpace = "172.16.0.0/12" - platformSupported = true - windowsMinCPUShares = 1 - windowsMaxCPUShares = 10000 -) - -func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { - return nil, nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - return nil -} - -func checkKernel() error { - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if hostConfig == nil { - return nil - } - - if hostConfig.CPUShares < 0 { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) - hostConfig.CPUShares = windowsMinCPUShares - } else if hostConfig.CPUShares > windowsMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) - hostConfig.CPUShares = windowsMaxCPUShares - } - - return nil -} - -func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo) ([]string, error) { - warnings := []string{} - - // cpu subsystem checks and adjustments - if resources.CPUPercent < 0 || resources.CPUPercent > 100 { - return warnings, fmt.Errorf("Range of CPU percent is from 1 to 100") - } - - if resources.CPUPercent > 0 && resources.CPUShares > 0 { - return warnings, fmt.Errorf("Conflicting options: CPU Shares and CPU Percent cannot both be set") - } - - // TODO Windows: Add more validation of resource settings not supported on Windows - - if resources.BlkioWeight > 0 { - warnings = append(warnings, "Windows does not support Block I/O weight. Weight discarded.") - logrus.Warn("Windows does not support Block I/O weight. --blkio-weight discarded.") - resources.BlkioWeight = 0 - } - if len(resources.BlkioWeightDevice) > 0 { - warnings = append(warnings, "Windows does not support Block I/O weight_device.") - logrus.Warn("Windows does not support Block I/O weight_device. --blkio-weight-device discarded.") - resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} - } - if len(resources.BlkioDeviceReadBps) > 0 { - warnings = append(warnings, "Windows does not support Block read limit in bytes per second.") - logrus.Warn("Windows does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") - resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteBps) > 0 { - warnings = append(warnings, "Windows does not support Block write limit in bytes per second.") - logrus.Warn("Windows does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") - resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceReadIOps) > 0 { - warnings = append(warnings, "Windows does not support Block read limit in IO per second.") - logrus.Warn("Windows does not support Block I/O read limit in IO per second. -device-read-iops discarded.") - resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteIOps) > 0 { - warnings = append(warnings, "Windows does not support Block write limit in IO per second.") - logrus.Warn("Windows does not support Block I/O write limit in IO per second. --device-write-iops discarded.") - resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} - } - return warnings, nil -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - - w, err := verifyContainerResources(&hostConfig.Resources, nil) - warnings = append(warnings, w...) - if err != nil { - return warnings, err - } - - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - // Validate the OS version. Note that docker.exe must be manifested for this - // call to return the correct version. - osv := system.GetOSVersion() - if osv.MajorVersion < 10 { - return fmt.Errorf("This version of Windows does not support the docker daemon") - } - if osv.Build < 14300 { - return fmt.Errorf("The Windows daemon requires Windows Server 2016 Technical Preview 5 build 14300 or later") - } - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - return nil -} - -// configureMaxThreads sets the Go runtime max threads threshold -func configureMaxThreads(config *Config) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, nil) - if err != nil { - return nil, err - } - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - - // Remove networks not present in HNS - for _, v := range controller.Networks() { - options := v.Info().DriverOptions() - hnsid := options[winlibnetwork.HNSID] - found := false - - for _, v := range hnsresponse { - if v.Id == hnsid { - found = true - break - } - } - - if !found { - err = v.Delete() - if err != nil { - return nil, err - } - } - } - - _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) - if err != nil { - return nil, err - } - - // discover and add HNS networks to windows - // network that exist are removed and added again - for _, v := range hnsresponse { - var n libnetwork.Network - s := func(current libnetwork.Network) bool { - options := current.Info().DriverOptions() - if options[winlibnetwork.HNSID] == v.Id { - n = current - return true - } - return false - } - - controller.WalkNetworks(s) - if n != nil { - v.Name = n.Name() - n.Delete() - } - - netOption := map[string]string{ - winlibnetwork.NetworkName: v.Name, - winlibnetwork.HNSID: v.Id, - } - - v4Conf := []*libnetwork.IpamConf{} - for _, subnet := range v.Subnets { - ipamV4Conf := libnetwork.IpamConf{} - ipamV4Conf.PreferredPool = subnet.AddressPrefix - ipamV4Conf.Gateway = subnet.GatewayAddress - v4Conf = append(v4Conf, &ipamV4Conf) - } - - name := v.Name - // There is only one nat network supported in windows. - // If it exists with a different name add it as the default name - if runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { - name = runconfig.DefaultDaemonNetworkMode().NetworkName() - } - - v6Conf := []*libnetwork.IpamConf{} - _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - ) - - if err != nil { - logrus.Errorf("Error occurred when creating network %v", err) - } - } - - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { - return nil - } - - netOption := map[string]string{ - winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), - } - - ipamV4Conf := libnetwork.IpamConf{} - if config.bridgeConfig.FixedCIDR == "" { - ipamV4Conf.PreferredPool = defaultNetworkSpace - } else { - ipamV4Conf.PreferredPool = config.bridgeConfig.FixedCIDR - } - - v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - - _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - ) - - if err != nil { - return fmt.Errorf("Error creating default network: %v", err) - } - return nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. As of Windows TP4, links are not supported. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMountsByID(in string) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - config.Root = rootDir - // Create the root directory if it doesn't exists - if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { - return err - } - return nil -} - -// runasHyperVContainer returns true if we are going to run as a Hyper-V container -func (daemon *Daemon) runAsHyperVContainer(container *container.Container) bool { - if container.HostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - return daemon.defaultIsolation.IsHyperV() - } - - // Container is requesting an isolation mode. Honour it. - return container.HostConfig.Isolation.IsHyperV() - -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - // We do not mount if a Hyper-V container - if !daemon.runAsHyperVContainer(container) { - return daemon.Mount(container) - } - return nil -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - // We do not unmount if a Hyper-V container - if !daemon.runAsHyperVContainer(container) { - return daemon.Unmount(container) - } - return nil -} - -func driverOptions(config *Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - return nil, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - daemon.defaultIsolation = containertypes.Isolation("process") - // On client SKUs, default to Hyper-V - if system.IsWindowsClient() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - for _, option := range daemon.configStore.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return err - } - key = strings.ToLower(key) - switch key { - - case "isolation": - if !containertypes.Isolation(val).IsValid() { - return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) - } - if containertypes.Isolation(val).IsHyperV() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - if containertypes.Isolation(val).IsProcess() { - if system.IsWindowsClient() { - return fmt.Errorf("Windows client operating systems only support Hyper-V containers") - } - daemon.defaultIsolation = containertypes.Isolation("process") - } - default: - return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) - } - } - - logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - BaseLayer: rootfs.BaseLayer, - } -} - -func setupDaemonProcess(config *Config) error { - return nil -} diff --git a/daemon/debugtrap_unix.go b/daemon/debugtrap_unix.go deleted file mode 100644 index c4a11b07f..000000000 --- a/daemon/debugtrap_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "os/signal" - "syscall" - - psignal "github.com/docker/docker/pkg/signal" -) - -func setupDumpStackTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGUSR1) - go func() { - for range c { - psignal.DumpStacks() - } - }() -} diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go deleted file mode 100644 index eed8222f7..000000000 --- a/daemon/debugtrap_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris - -package daemon - -func setupDumpStackTrap() { - return -} diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go deleted file mode 100644 index 0eebc46ed..000000000 --- a/daemon/debugtrap_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" -) - -func setupDumpStackTrap() { - // Windows does not support signals like *nix systems. So instead of - // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be - // signaled. - go func() { - sa := syscall.SecurityAttributes{ - Length: 0, - } - ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) - if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { - logrus.Debugf("Stackdump - waiting signal at %s", ev) - for { - syscall.WaitForSingleObject(h, syscall.INFINITE) - signal.DumpStacks() - } - } - }() -} diff --git a/daemon/delete.go b/daemon/delete.go deleted file mode 100644 index ec9d5c5f1..000000000 --- a/daemon/delete.go +++ /dev/null @@ -1,157 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "path" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/layer" - volumestore "github.com/docker/docker/volume/store" - "github.com/docker/engine-api/types" -) - -// ContainerRm removes the container id from the filesystem. An error -// is returned if the container is not found, or if the remove -// fails. If the remove succeeds, the container name is released, and -// network links are removed. -func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - // Container state RemovalInProgress should be used to avoid races. - if inProgress := container.SetRemovalInProgress(); inProgress { - return nil - } - defer container.ResetRemovalInProgress() - - // check if container wasn't deregistered by previous rm since Get - if c := daemon.containers.Get(container.ID); c == nil { - return nil - } - - if config.RemoveLink { - return daemon.rmLink(container, name) - } - - err = daemon.cleanupContainer(container, config.ForceRemove) - if err == nil || config.ForceRemove { - if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { - logrus.Error(e) - } - } - - return err -} - -func (daemon *Daemon) rmLink(container *container.Container, name string) error { - if name[0] != '/' { - name = "/" + name - } - parent, n := path.Split(name) - if parent == "/" { - return fmt.Errorf("Conflict, cannot remove the default name of the container") - } - - parent = strings.TrimSuffix(parent, "/") - pe, err := daemon.nameIndex.Get(parent) - if err != nil { - return fmt.Errorf("Cannot get parent %s for name %s", parent, name) - } - - daemon.releaseName(name) - parentContainer, _ := daemon.GetContainer(pe) - if parentContainer != nil { - daemon.linkIndex.unlink(name, container, parentContainer) - if err := daemon.updateNetwork(parentContainer); err != nil { - logrus.Debugf("Could not update network to remove link %s: %v", n, err) - } - } - return nil -} - -// cleanupContainer unregisters a container from the daemon, stops stats -// collection and cleanly removes contents and metadata from the filesystem. -func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { - if container.IsRunning() { - if !forceRemove { - err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) - return errors.NewRequestConflictError(err) - } - if err := daemon.Kill(container); err != nil { - return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) - } - } - - // stop collection of stats for the container regardless - // if stats are currently getting collected. - daemon.statsCollector.stopCollection(container) - - if err = daemon.containerStop(container, 3); err != nil { - return err - } - - // Mark container dead. We don't want anybody to be restarting it. - container.SetDead() - - // Save container state to disk. So that if error happens before - // container meta file got removed from disk, then a restart of - // docker should not make a dead container alive. - if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { - logrus.Errorf("Error saving dying container to disk: %v", err) - } - - // If force removal is required, delete container from various - // indexes even if removal failed. - defer func() { - if err == nil || forceRemove { - daemon.nameIndex.Delete(container.ID) - daemon.linkIndex.delete(container) - selinuxFreeLxcContexts(container.ProcessLabel) - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - daemon.LogContainerEvent(container, "destroy") - } - }() - - if err = os.RemoveAll(container.Root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } - - // When container creation fails and `RWLayer` has not been created yet, we - // do not call `ReleaseRWLayer` - if container.RWLayer != nil { - metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) - layer.LogReleaseMetadata(metadata) - if err != nil && err != layer.ErrMountDoesNotExist { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) - } - } - - return nil -} - -// VolumeRm removes the volume with the given name. -// If the volume is referenced by a container it is not removed -// This is called directly from the remote API -func (daemon *Daemon) VolumeRm(name string) error { - v, err := daemon.volumes.Get(name) - if err != nil { - return err - } - - if err := daemon.volumes.Remove(v); err != nil { - if volumestore.IsInUse(err) { - err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) - return errors.NewRequestConflictError(err) - } - return fmt.Errorf("Error while removing volume %s: %v", name, err) - } - daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) - return nil -} diff --git a/daemon/delete_test.go b/daemon/delete_test.go deleted file mode 100644 index adce2eb8c..000000000 --- a/daemon/delete_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/container" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" -) - -func TestContainerDoubleDelete(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - daemon.containers = container.NewMemoryStore() - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: container.NewState(), - Config: &containertypes.Config{}, - }, - } - daemon.containers.Add(container.ID, container) - - // Mark the container as having a delete in progress - container.SetRemovalInProgress() - - // Try to remove the container when it's start is removalInProgress. - // It should ignore the container and not return an error. - if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { - t.Fatal(err) - } -} diff --git a/daemon/discovery.go b/daemon/discovery.go deleted file mode 100644 index 30d2e02a7..000000000 --- a/daemon/discovery.go +++ /dev/null @@ -1,203 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/discovery" - - // Register the libkv backends for discovery. - _ "github.com/docker/docker/pkg/discovery/kv" -) - -const ( - // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. - defaultDiscoveryHeartbeat = 20 * time.Second - // defaultDiscoveryTTLFactor is the default TTL factor for discovery - defaultDiscoveryTTLFactor = 3 -) - -var errDiscoveryDisabled = errors.New("discovery is disabled") - -type discoveryReloader interface { - discovery.Watcher - Stop() - Reload(backend, address string, clusterOpts map[string]string) error - ReadyCh() <-chan struct{} -} - -type daemonDiscoveryReloader struct { - backend discovery.Backend - ticker *time.Ticker - term chan bool - readyCh chan struct{} -} - -func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - return d.backend.Watch(stopCh) -} - -func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { - return d.readyCh -} - -func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { - var ( - heartbeat = defaultDiscoveryHeartbeat - ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat - ) - - if hb, ok := clusterOpts["discovery.heartbeat"]; ok { - h, err := strconv.Atoi(hb) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - heartbeat = time.Duration(h) * time.Second - ttl = defaultDiscoveryTTLFactor * heartbeat - } - - if tstr, ok := clusterOpts["discovery.ttl"]; ok { - t, err := strconv.Atoi(tstr) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - ttl = time.Duration(t) * time.Second - - if _, ok := clusterOpts["discovery.heartbeat"]; !ok { - h := int(t / defaultDiscoveryTTLFactor) - heartbeat = time.Duration(h) * time.Second - } - - if ttl <= heartbeat { - return time.Duration(0), time.Duration(0), - fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") - } - } - - return heartbeat, ttl, nil -} - -// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend -// and starts a registration loop to advertise the current node under the specified address. -func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return nil, err - } - - reloader := &daemonDiscoveryReloader{ - backend: backend, - ticker: time.NewTicker(heartbeat), - term: make(chan bool), - readyCh: make(chan struct{}), - } - // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, - // but we never actually Watch() for nodes appearing and disappearing for the moment. - go reloader.advertiseHeartbeat(advertiseAddress) - return reloader, nil -} - -// advertiseHeartbeat registers the current node against the discovery backend using the specified -// address. The function never returns, as registration against the backend comes with a TTL and -// requires regular heartbeats. -func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { - var ready bool - if err := d.initHeartbeat(address); err == nil { - ready = true - close(d.readyCh) - } - - for { - select { - case <-d.ticker.C: - if err := d.backend.Register(address); err != nil { - log.Warnf("Registering as %q in discovery failed: %v", address, err) - } else { - if !ready { - close(d.readyCh) - ready = true - } - } - case <-d.term: - return - } - } -} - -// initHeartbeat is used to do the first heartbeat. It uses a tight loop until -// either the timeout period is reached or the heartbeat is successful and returns. -func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { - // Setup a short ticker until the first heartbeat has succeeded - t := time.NewTicker(500 * time.Millisecond) - defer t.Stop() - // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service - timeout := time.After(60 * time.Second) - - for { - select { - case <-timeout: - return errors.New("timeout waiting for initial discovery") - case <-d.term: - return errors.New("terminated") - case <-t.C: - if err := d.backend.Register(address); err == nil { - return nil - } - } - } -} - -// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. -func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { - d.Stop() - - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return err - } - - d.backend = backend - d.ticker = time.NewTicker(heartbeat) - d.readyCh = make(chan struct{}) - - go d.advertiseHeartbeat(advertiseAddress) - return nil -} - -// Stop terminates the discovery advertising. -func (d *daemonDiscoveryReloader) Stop() { - d.ticker.Stop() - d.term <- true -} - -func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err != nil { - return 0, nil, err - } - - backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) - if err != nil { - return 0, nil, err - } - return heartbeat, backend, nil -} - -// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. -func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { - if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { - return true - } - - if (config.ClusterOpts == nil && clusterOpts == nil) || - (config.ClusterOpts == nil && len(clusterOpts) == 0) || - (len(config.ClusterOpts) == 0 && clusterOpts == nil) { - return false - } - - return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) -} diff --git a/daemon/discovery_test.go b/daemon/discovery_test.go deleted file mode 100644 index 1764af1e9..000000000 --- a/daemon/discovery_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package daemon - -import ( - "testing" - "time" -) - -func TestDiscoveryOpts(t *testing.T) { - clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl < discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl == discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.ttl": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - if ttl != 20*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - expected := 10 * defaultDiscoveryTTLFactor * time.Second - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } - - clusterOpts = map[string]string{"discovery.ttl": "30"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if ttl != 30*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) - } - - expected = 30 * time.Second / defaultDiscoveryTTLFactor - if heartbeat != expected { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", expected, heartbeat) - } - - clusterOpts = map[string]string{} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != defaultDiscoveryHeartbeat { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) - } - - expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } -} - -func TestModifiedDiscoverySettings(t *testing.T) { - cases := []struct { - current *Config - modified *Config - expected bool - }{ - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", nil), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("baz", "bar", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "baz", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: true, - }, - } - - for _, c := range cases { - got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) - if c.expected != got { - t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) - } - } -} - -func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { - return &Config{ - CommonConfig: CommonConfig{ - ClusterStore: backendAddr, - ClusterAdvertise: advertiseAddr, - ClusterOpts: opts, - }, - } -} diff --git a/daemon/errors.go b/daemon/errors.go deleted file mode 100644 index 131c9a1e2..000000000 --- a/daemon/errors.go +++ /dev/null @@ -1,57 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/errors" - "github.com/docker/docker/reference" -) - -func (d *Daemon) imageNotExistToErrcode(err error) error { - if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { - if strings.Contains(dne.RefOrID, "@") { - e := fmt.Errorf("No such image: %s", dne.RefOrID) - return errors.NewRequestNotFoundError(e) - } - tag := reference.DefaultTag - ref, err := reference.ParseNamed(dne.RefOrID) - if err != nil { - e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) - return errors.NewRequestNotFoundError(e) - } - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } - e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) - return errors.NewRequestNotFoundError(e) - } - return err -} - -type errNotRunning struct { - containerID string -} - -func (e errNotRunning) Error() string { - return fmt.Sprintf("Container %s is not running", e.containerID) -} - -func (e errNotRunning) ContainerIsRunning() bool { - return false -} - -func errContainerIsRestarting(containerID string) error { - err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) - return errors.NewRequestConflictError(err) -} - -func errExecNotFound(id string) error { - err := fmt.Errorf("No such exec instance '%s' found in daemon", id) - return errors.NewRequestNotFoundError(err) -} - -func errExecPaused(id string) error { - err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) - return errors.NewRequestConflictError(err) -} diff --git a/daemon/events.go b/daemon/events.go deleted file mode 100644 index 7fb8cd29c..000000000 --- a/daemon/events.go +++ /dev/null @@ -1,117 +0,0 @@ -package daemon - -import ( - "strings" - "time" - - "github.com/docker/docker/container" - daemonevents "github.com/docker/docker/daemon/events" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/libnetwork" -) - -// LogContainerEvent generates an event related to a container with only the default attributes. -func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { - daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) -} - -// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. -func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { - copyAttributes(attributes, container.Config.Labels) - if container.Config.Image != "" { - attributes["image"] = container.Config.Image - } - attributes["name"] = strings.TrimLeft(container.Name, "/") - - actor := events.Actor{ - ID: container.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.ContainerEventType, actor) -} - -// LogImageEvent generates an event related to an image with only the default attributes. -func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { - daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) -} - -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := daemon.GetImage(imageID) - if err == nil && img.Config != nil { - // image has not been removed yet. - // it could be missing if the event is `delete`. - copyAttributes(attributes, img.Config.Labels) - } - if refName != "" { - attributes["name"] = refName - } - actor := events.Actor{ - ID: imageID, - Attributes: attributes, - } - - daemon.EventsService.Log(action, events.ImageEventType, actor) -} - -// LogVolumeEvent generates an event related to a volume. -func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { - actor := events.Actor{ - ID: volumeID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.VolumeEventType, actor) -} - -// LogNetworkEvent generates an event related to a network with only the default attributes. -func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { - daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) -} - -// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. -func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { - attributes["name"] = nw.Name() - attributes["type"] = nw.Type() - actor := events.Actor{ - ID: nw.ID(), - Attributes: attributes, - } - daemon.EventsService.Log(action, events.NetworkEventType, actor) -} - -// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. -func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { - if daemon.EventsService != nil { - if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { - attributes["name"] = info.Name - } - actor := events.Actor{ - ID: daemon.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.DaemonEventType, actor) - } -} - -// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. -func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { - ef := daemonevents.NewFilter(filter) - return daemon.EventsService.SubscribeTopic(since, until, ef) -} - -// UnsubscribeFromEvents stops the event subscription for a client by closing the -// channel where the daemon sends events to. -func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { - daemon.EventsService.Evict(listener) -} - -// copyAttributes guarantees that labels are not mutated by event triggers. -func copyAttributes(attributes, labels map[string]string) { - if labels == nil { - return - } - for k, v := range labels { - attributes[k] = v - } -} diff --git a/daemon/events/events.go b/daemon/events/events.go deleted file mode 100644 index df2181fb0..000000000 --- a/daemon/events/events.go +++ /dev/null @@ -1,154 +0,0 @@ -package events - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/pubsub" - eventtypes "github.com/docker/engine-api/types/events" -) - -const ( - eventsLimit = 64 - bufferSize = 1024 -) - -// Events is pubsub channel for events generated by the engine. -type Events struct { - mu sync.Mutex - events []eventtypes.Message - pub *pubsub.Publisher -} - -// New returns new *Events instance -func New() *Events { - return &Events{ - events: make([]eventtypes.Message, 0, eventsLimit), - pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), - } -} - -// Subscribe adds new listener to events, returns slice of 64 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion), and a function to call -// to stop the stream of events. -func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { - e.mu.Lock() - current := make([]eventtypes.Message, len(e.events)) - copy(current, e.events) - l := e.pub.Subscribe() - e.mu.Unlock() - - cancel := func() { - e.Evict(l) - } - return current, l, cancel -} - -// SubscribeTopic adds new listener to events, returns slice of 64 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion). -func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { - e.mu.Lock() - - var topic func(m interface{}) bool - if ef != nil && ef.filter.Len() > 0 { - topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } - } - - buffered := e.loadBufferedEvents(since, until, topic) - - var ch chan interface{} - if topic != nil { - ch = e.pub.SubscribeTopic(topic) - } else { - // Subscribe to all events if there are no filters - ch = e.pub.Subscribe() - } - - e.mu.Unlock() - return buffered, ch -} - -// Evict evicts listener from pubsub -func (e *Events) Evict(l chan interface{}) { - e.pub.Evict(l) -} - -// Log broadcasts event to listeners. Each listener has 100 millisecond for -// receiving event or it will be skipped. -func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { - now := time.Now().UTC() - jm := eventtypes.Message{ - Action: action, - Type: eventType, - Actor: actor, - Time: now.Unix(), - TimeNano: now.UnixNano(), - } - - // fill deprecated fields for container and images - switch eventType { - case eventtypes.ContainerEventType: - jm.ID = actor.ID - jm.Status = action - jm.From = actor.Attributes["image"] - case eventtypes.ImageEventType: - jm.ID = actor.ID - jm.Status = action - } - - e.mu.Lock() - if len(e.events) == cap(e.events) { - // discard oldest event - copy(e.events, e.events[1:]) - e.events[len(e.events)-1] = jm - } else { - e.events = append(e.events, jm) - } - e.mu.Unlock() - e.pub.Publish(jm) -} - -// SubscribersCount returns number of event listeners -func (e *Events) SubscribersCount() int { - return e.pub.Len() -} - -// loadBufferedEvents iterates over the cached events in the buffer -// and returns those that were emitted between two specific dates. -// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. -// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. -func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { - var buffered []eventtypes.Message - if since.IsZero() && until.IsZero() { - return buffered - } - - var sinceNanoUnix int64 - if !since.IsZero() { - sinceNanoUnix = since.UnixNano() - } - - var untilNanoUnix int64 - if !until.IsZero() { - untilNanoUnix = until.UnixNano() - } - - for i := len(e.events) - 1; i >= 0; i-- { - ev := e.events[i] - - if ev.TimeNano < sinceNanoUnix { - break - } - - if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { - continue - } - - if topic == nil || topic(ev) { - buffered = append([]eventtypes.Message{ev}, buffered...) - } - } - return buffered -} diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go deleted file mode 100644 index 0c8ee6b92..000000000 --- a/daemon/events/events_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package events - -import ( - "fmt" - "testing" - "time" - - "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/engine-api/types/events" - timetypes "github.com/docker/engine-api/types/time" -) - -func TestEventsLog(t *testing.T) { - e := New() - _, l1, _ := e.Subscribe() - _, l2, _ := e.Subscribe() - defer e.Evict(l1) - defer e.Evict(l2) - count := e.SubscribersCount() - if count != 2 { - t.Fatalf("Must be 2 subscribers, got %d", count) - } - actor := events.Actor{ - ID: "cont", - Attributes: map[string]string{"image": "image"}, - } - e.Log("test", events.ContainerEventType, actor) - select { - case msg := <-l1: - jmsg, ok := msg.(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", msg) - } - if len(e.events) != 1 { - t.Fatalf("Must be only one event, got %d", len(e.events)) - } - if jmsg.Status != "test" { - t.Fatalf("Status should be test, got %s", jmsg.Status) - } - if jmsg.ID != "cont" { - t.Fatalf("ID should be cont, got %s", jmsg.ID) - } - if jmsg.From != "image" { - t.Fatalf("From should be image, got %s", jmsg.From) - } - case <-time.After(1 * time.Second): - t.Fatal("Timeout waiting for broadcasted message") - } - select { - case msg := <-l2: - jmsg, ok := msg.(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", msg) - } - if len(e.events) != 1 { - t.Fatalf("Must be only one event, got %d", len(e.events)) - } - if jmsg.Status != "test" { - t.Fatalf("Status should be test, got %s", jmsg.Status) - } - if jmsg.ID != "cont" { - t.Fatalf("ID should be cont, got %s", jmsg.ID) - } - if jmsg.From != "image" { - t.Fatalf("From should be image, got %s", jmsg.From) - } - case <-time.After(1 * time.Second): - t.Fatal("Timeout waiting for broadcasted message") - } -} - -func TestEventsLogTimeout(t *testing.T) { - e := New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - c := make(chan struct{}) - go func() { - actor := events.Actor{ - ID: "image", - } - e.Log("test", events.ImageEventType, actor) - close(c) - }() - - select { - case <-c: - case <-time.After(time.Second): - t.Fatal("Timeout publishing message") - } -} - -func TestLogEvents(t *testing.T) { - e := New() - - for i := 0; i < eventsLimit+16; i++ { - action := fmt.Sprintf("action_%d", i) - id := fmt.Sprintf("cont_%d", i) - from := fmt.Sprintf("image_%d", i) - - actor := events.Actor{ - ID: id, - Attributes: map[string]string{"image": from}, - } - e.Log(action, events.ContainerEventType, actor) - } - time.Sleep(50 * time.Millisecond) - current, l, _ := e.Subscribe() - for i := 0; i < 10; i++ { - num := i + eventsLimit + 16 - action := fmt.Sprintf("action_%d", num) - id := fmt.Sprintf("cont_%d", num) - from := fmt.Sprintf("image_%d", num) - - actor := events.Actor{ - ID: id, - Attributes: map[string]string{"image": from}, - } - e.Log(action, events.ContainerEventType, actor) - } - if len(e.events) != eventsLimit { - t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) - } - - var msgs []events.Message - for len(msgs) < 10 { - m := <-l - jm, ok := (m).(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", m) - } - msgs = append(msgs, jm) - } - if len(current) != eventsLimit { - t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) - } - first := current[0] - if first.Status != "action_16" { - t.Fatalf("First action is %s, must be action_16", first.Status) - } - last := current[len(current)-1] - if last.Status != "action_79" { - t.Fatalf("Last action is %s, must be action_79", last.Status) - } - - firstC := msgs[0] - if firstC.Status != "action_80" { - t.Fatalf("First action is %s, must be action_80", firstC.Status) - } - lastC := msgs[len(msgs)-1] - if lastC.Status != "action_89" { - t.Fatalf("Last action is %s, must be action_89", lastC.Status) - } -} - -// https://github.com/docker/docker/issues/20999 -// Fixtures: -// -//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) -//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) -//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) -func TestLoadBufferedEvents(t *testing.T) { - now := time.Now() - f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) - if err != nil { - t.Fatal(err) - } - s, sNano, err := timetypes.ParseTimestamps(f, -1) - if err != nil { - t.Fatal(err) - } - - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Unix(s, sNano) - until := time.Time{} - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 1 { - t.Fatalf("expected 1 message, got %d: %v", len(out), out) - } -} - -func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { - now := time.Now() - f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) - if err != nil { - t.Fatal(err) - } - s, sNano, err := timetypes.ParseTimestamps(f, 0) - if err != nil { - t.Fatal(err) - } - - f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) - if err != nil { - t.Fatal(err) - } - u, uNano, err := timetypes.ParseTimestamps(f, 0) - if err != nil { - t.Fatal(err) - } - - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Unix(s, sNano) - until := time.Unix(u, uNano) - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 1 { - t.Fatalf("expected 1 message, got %d: %v", len(out), out) - } - - if out[0].Type != "network" { - t.Fatalf("expected network event, got %s", out[0].Type) - } -} - -// #13753 -func TestIngoreBufferedWhenNoTimes(t *testing.T) { - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Time{} - until := time.Time{} - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 0 { - t.Fatalf("expected 0 buffered events, got %q", out) - } -} diff --git a/daemon/events/filter.go b/daemon/events/filter.go deleted file mode 100644 index b38c034c9..000000000 --- a/daemon/events/filter.go +++ /dev/null @@ -1,87 +0,0 @@ -package events - -import ( - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" -) - -// Filter can filter out docker events from a stream -type Filter struct { - filter filters.Args -} - -// NewFilter creates a new Filter -func NewFilter(filter filters.Args) *Filter { - return &Filter{filter: filter} -} - -// Include returns true when the event ev is included by the filters -func (ef *Filter) Include(ev events.Message) bool { - return ef.filter.ExactMatch("event", ev.Action) && - ef.filter.ExactMatch("type", ev.Type) && - ef.matchDaemon(ev) && - ef.matchContainer(ev) && - ef.matchVolume(ev) && - ef.matchNetwork(ev) && - ef.matchImage(ev) && - ef.matchLabels(ev.Actor.Attributes) -} - -func (ef *Filter) matchLabels(attributes map[string]string) bool { - if !ef.filter.Include("label") { - return true - } - return ef.filter.MatchKVList("label", attributes) -} - -func (ef *Filter) matchDaemon(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.DaemonEventType) -} - -func (ef *Filter) matchContainer(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.ContainerEventType) -} - -func (ef *Filter) matchVolume(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.VolumeEventType) -} - -func (ef *Filter) matchNetwork(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.NetworkEventType) -} - -func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { - return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || - ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) -} - -// matchImage matches against both event.Actor.ID (for image events) -// and event.Actor.Attributes["image"] (for container events), so that any container that was created -// from an image will be included in the image events. Also compare both -// against the stripped repo name without any tags. -func (ef *Filter) matchImage(ev events.Message) bool { - id := ev.Actor.ID - nameAttr := "image" - var imageName string - - if ev.Type == events.ImageEventType { - nameAttr = "name" - } - - if n, ok := ev.Actor.Attributes[nameAttr]; ok { - imageName = n - } - return ef.filter.ExactMatch("image", id) || - ef.filter.ExactMatch("image", imageName) || - ef.filter.ExactMatch("image", stripTag(id)) || - ef.filter.ExactMatch("image", stripTag(imageName)) -} - -func stripTag(image string) string { - ref, err := reference.ParseNamed(image) - if err != nil { - return image - } - return ref.Name() -} diff --git a/daemon/events/testutils/testutils.go b/daemon/events/testutils/testutils.go deleted file mode 100644 index c84418a9e..000000000 --- a/daemon/events/testutils/testutils.go +++ /dev/null @@ -1,76 +0,0 @@ -package eventstestutils - -import ( - "fmt" - "regexp" - "strings" - "time" - - "github.com/docker/engine-api/types/events" - timetypes "github.com/docker/engine-api/types/time" -) - -var ( - reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` - reEventType = `(?P\w+)` - reAction = `(?P\w+)` - reID = `(?P[^\s]+)` - reAttributes = `(\s\((?P[^\)]+)\))?` - reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) - - // eventCliRegexp is a regular expression that matches all possible event outputs in the cli - eventCliRegexp = regexp.MustCompile(reString) -) - -// ScanMap turns an event string like the default ones formatted in the cli output -// and turns it into map. -func ScanMap(text string) map[string]string { - matches := eventCliRegexp.FindAllStringSubmatch(text, -1) - md := map[string]string{} - if len(matches) == 0 { - return md - } - - names := eventCliRegexp.SubexpNames() - for i, n := range matches[0] { - md[names[i]] = n - } - return md -} - -// Scan turns an event string like the default ones formatted in the cli output -// and turns it into an event message. -func Scan(text string) (*events.Message, error) { - md := ScanMap(text) - if len(md) == 0 { - return nil, fmt.Errorf("text is not an event: %s", text) - } - - f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) - if err != nil { - return nil, err - } - - t, tn, err := timetypes.ParseTimestamps(f, -1) - if err != nil { - return nil, err - } - - attrs := make(map[string]string) - for _, a := range strings.SplitN(md["attributes"], ", ", -1) { - kv := strings.SplitN(a, "=", 2) - attrs[kv[0]] = kv[1] - } - - tu := time.Unix(t, tn) - return &events.Message{ - Time: t, - TimeNano: tu.UnixNano(), - Type: md["eventType"], - Action: md["action"], - Actor: events.Actor{ - ID: md["id"], - Attributes: attrs, - }, - }, nil -} diff --git a/daemon/events_test.go b/daemon/events_test.go deleted file mode 100644 index 8ee14a314..000000000 --- a/daemon/events_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package daemon - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - containertypes "github.com/docker/engine-api/types/container" - eventtypes "github.com/docker/engine-api/types/events" -) - -func TestLogContainerEventCopyLabels(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - daemon.LogContainerEvent(container, "create") - - if _, mutated := container.Config.Labels["image"]; mutated { - t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) - } - - validateTestAttributes(t, l, map[string]string{ - "node": "1", - "os": "alpine", - }) -} - -func TestLogContainerEventWithAttributes(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - attributes := map[string]string{ - "node": "2", - "foo": "bar", - } - daemon.LogContainerEventWithAttributes(container, "create", attributes) - - validateTestAttributes(t, l, map[string]string{ - "node": "1", - "foo": "bar", - }) -} - -func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { - select { - case ev := <-l: - event, ok := ev.(eventtypes.Message) - if !ok { - t.Fatalf("Unexpected event message: %q", ev) - } - for key, expected := range expectedAttributesToTest { - actual, ok := event.Actor.Attributes[key] - if !ok || actual != expected { - t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) - } - } - case <-time.After(10 * time.Second): - t.Fatalf("LogEvent test timed out") - } -} diff --git a/daemon/exec.go b/daemon/exec.go deleted file mode 100644 index b32322291..000000000 --- a/daemon/exec.go +++ /dev/null @@ -1,268 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/errors" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/strslice" -) - -// Seconds to wait after sending TERM before trying KILL -const termProcessTimeout = 10 - -func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { - // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. - container.ExecCommands.Add(config.ID, config) - // Storing execs in daemon for easy access via remote API. - d.execCommands.Add(config.ID, config) -} - -// ExecExists looks up the exec instance and returns a bool if it exists or not. -// It will also return the error produced by `getConfig` -func (d *Daemon) ExecExists(name string) (bool, error) { - if _, err := d.getExecConfig(name); err != nil { - return false, err - } - return true, nil -} - -// getExecConfig looks up the exec instance by name. If the container associated -// with the exec instance is stopped or paused, it will return an error. -func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { - ec := d.execCommands.Get(name) - - // If the exec is found but its container is not in the daemon's list of - // containers then it must have been deleted, in which case instead of - // saying the container isn't running, we should return a 404 so that - // the user sees the same error now that they will after the - // 5 minute clean-up loop is run which erases old/dead execs. - - if ec != nil { - if container := d.containers.Get(ec.ContainerID); container != nil { - if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) - } - if container.IsPaused() { - return nil, errExecPaused(container.ID) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return ec, nil - } - } - - return nil, errExecNotFound(name) -} - -func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { - container.ExecCommands.Delete(execConfig.ID) - d.execCommands.Delete(execConfig.ID) -} - -func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { - container, err := d.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning{container.ID} - } - if container.IsPaused() { - return nil, errExecPaused(name) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return container, nil -} - -// ContainerExecCreate sets up an exec in a running container. -func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { - container, err := d.getActiveContainer(name) - if err != nil { - return "", err - } - - cmd := strslice.StrSlice(config.Cmd) - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) - - keys := []byte{} - if config.DetachKeys != "" { - keys, err = term.ToBytes(config.DetachKeys) - if err != nil { - err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) - return "", err - } - } - - execConfig := exec.NewConfig() - execConfig.OpenStdin = config.AttachStdin - execConfig.OpenStdout = config.AttachStdout - execConfig.OpenStderr = config.AttachStderr - execConfig.ContainerID = container.ID - execConfig.DetachKeys = keys - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = config.Tty - execConfig.Privileged = config.Privileged - execConfig.User = config.User - if len(execConfig.User) == 0 { - execConfig.User = container.Config.User - } - - d.registerExecCommand(container, execConfig) - - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) - - return execConfig.ID, nil -} - -// ContainerExecStart starts a previously set up exec instance. The -// std streams are set up. -// If ctx is cancelled, the process is terminated. -func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { - var ( - cStdin io.ReadCloser - cStdout, cStderr io.Writer - ) - - ec, err := d.getExecConfig(name) - if err != nil { - return errExecNotFound(name) - } - - ec.Lock() - if ec.ExitCode != nil { - ec.Unlock() - err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) - return errors.NewRequestConflictError(err) - } - - if ec.Running { - ec.Unlock() - return fmt.Errorf("Error: Exec command %s is already running", ec.ID) - } - ec.Running = true - defer func() { - if err != nil { - ec.Running = false - exitCode := 126 - ec.ExitCode = &exitCode - } - }() - ec.Unlock() - - c := d.containers.Get(ec.ContainerID) - logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) - d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) - - if ec.OpenStdin && stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - pools.Copy(w, stdin) - }() - cStdin = r - } - if ec.OpenStdout { - cStdout = stdout - } - if ec.OpenStderr { - cStderr = stderr - } - - if ec.OpenStdin { - ec.NewInputPipes() - } else { - ec.NewNopInputPipe() - } - - p := libcontainerd.Process{ - Args: append([]string{ec.Entrypoint}, ec.Args...), - Terminal: ec.Tty, - } - - if err := execSetPlatformOpt(c, ec, &p); err != nil { - return nil - } - - attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) - - if err := d.containerd.AddProcess(c.ID, name, p); err != nil { - return err - } - - select { - case <-ctx.Done(): - logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) - select { - case <-time.After(termProcessTimeout * time.Second): - logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) - case <-attachErr: - // TERM signal worked - } - return fmt.Errorf("context cancelled") - case err := <-attachErr: - if err != nil { - if _, ok := err.(container.DetachError); !ok { - return fmt.Errorf("exec attach failed with error: %v", err) - } - d.LogContainerEvent(c, "exec_detach") - } - } - return nil -} - -// execCommandGC runs a ticker to clean up the daemon references -// of exec configs that are no longer part of the container. -func (d *Daemon) execCommandGC() { - for range time.Tick(5 * time.Minute) { - var ( - cleaned int - liveExecCommands = d.containerExecIds() - ) - for id, config := range d.execCommands.Commands() { - if config.CanRemove { - cleaned++ - d.execCommands.Delete(id) - } else { - if _, exists := liveExecCommands[id]; !exists { - config.CanRemove = true - } - } - } - if cleaned > 0 { - logrus.Debugf("clean %d unused exec commands", cleaned) - } - } -} - -// containerExecIds returns a list of all the current exec ids that are in use -// and running inside a container. -func (d *Daemon) containerExecIds() map[string]struct{} { - ids := map[string]struct{}{} - for _, c := range d.containers.List() { - for _, id := range c.ExecCommands.List() { - ids[id] = struct{}{} - } - } - return ids -} diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go deleted file mode 100644 index bbeb1c16a..000000000 --- a/daemon/exec/exec.go +++ /dev/null @@ -1,93 +0,0 @@ -package exec - -import ( - "sync" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" -) - -// Config holds the configurations for execs. The Daemon keeps -// track of both running and finished execs so that they can be -// examined both during and after completion. -type Config struct { - sync.Mutex - *runconfig.StreamConfig - ID string - Running bool - ExitCode *int - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte - Entrypoint string - Args []string - Tty bool - Privileged bool - User string -} - -// NewConfig initializes the a new exec configuration -func NewConfig() *Config { - return &Config{ - ID: stringid.GenerateNonCryptoID(), - StreamConfig: runconfig.NewStreamConfig(), - } -} - -// Store keeps track of the exec configurations. -type Store struct { - commands map[string]*Config - sync.RWMutex -} - -// NewStore initializes a new exec store. -func NewStore() *Store { - return &Store{commands: make(map[string]*Config, 0)} -} - -// Commands returns the exec configurations in the store. -func (e *Store) Commands() map[string]*Config { - e.RLock() - commands := make(map[string]*Config, len(e.commands)) - for id, config := range e.commands { - commands[id] = config - } - e.RUnlock() - return commands -} - -// Add adds a new exec configuration to the store. -func (e *Store) Add(id string, Config *Config) { - e.Lock() - e.commands[id] = Config - e.Unlock() -} - -// Get returns an exec configuration by its id. -func (e *Store) Get(id string) *Config { - e.RLock() - res := e.commands[id] - e.RUnlock() - return res -} - -// Delete removes an exec configuration from the store. -func (e *Store) Delete(id string) { - e.Lock() - delete(e.commands, id) - e.Unlock() -} - -// List returns the list of exec ids in the store. -func (e *Store) List() []string { - var IDs []string - e.RLock() - for id := range e.commands { - IDs = append(IDs, id) - } - e.RUnlock() - return IDs -} diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go deleted file mode 100644 index a2c86b286..000000000 --- a/daemon/exec_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - if len(ec.User) > 0 { - uid, gid, additionalGids, err := getUser(c, ec.User) - if err != nil { - return err - } - p.User = &libcontainerd.User{ - UID: uid, - GID: gid, - AdditionalGids: additionalGids, - } - } - if ec.Privileged { - p.Capabilities = caps.GetAllCapabilities() - } - return nil -} diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go deleted file mode 100644 index 7003355d9..000000000 --- a/daemon/exec_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - return nil -} diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go deleted file mode 100644 index a6ac1db42..000000000 --- a/daemon/exec_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - // Process arguments need to be escaped before sending to OCI. - p.Args = escapeArgs(p.Args) - return nil -} diff --git a/daemon/export.go b/daemon/export.go deleted file mode 100644 index 80d7dbb2e..000000000 --- a/daemon/export.go +++ /dev/null @@ -1,55 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" -) - -// ContainerExport writes the contents of the container to the given -// writer. An error is returned if the container cannot be found. -func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - data, err := daemon.containerExport(container) - if err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - defer data.Close() - - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(out, data); err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - return nil -} - -func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { - if err := daemon.Mount(container); err != nil { - return nil, err - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - }) - if err != nil { - daemon.Unmount(container) - return nil, err - } - arch := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - daemon.Unmount(container) - return err - }) - daemon.LogContainerEvent(container, "export") - return arch, err -} diff --git a/daemon/health.go b/daemon/health.go deleted file mode 100644 index 2f2d79ebc..000000000 --- a/daemon/health.go +++ /dev/null @@ -1,315 +0,0 @@ -package daemon - -import ( - "bytes" - "fmt" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/strslice" -) - -const ( - // Longest healthcheck probe output message to store. Longer messages will be truncated. - maxOutputLen = 4096 - - // Default interval between probe runs (from the end of the first to the start of the second). - // Also the time before the first probe. - defaultProbeInterval = 30 * time.Second - - // The maximum length of time a single probe run should take. If the probe takes longer - // than this, the check is considered to have failed. - defaultProbeTimeout = 30 * time.Second - - // Default number of consecutive failures of the health check - // for the container to be considered unhealthy. - defaultProbeRetries = 3 - - // Maximum number of entries to record - maxLogEntries = 5 -) - -const ( - // Exit status codes that can be returned by the probe command. - - exitStatusHealthy = 0 // Container is healthy - exitStatusUnhealthy = 1 // Container is unhealthy - exitStatusStarting = 2 // Container needs more time to start -) - -// probe implementations know how to run a particular type of probe. -type probe interface { - // Perform one run of the check. Returns the exit code and an optional - // short diagnostic string. - run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) -} - -// cmdProbe implements the "CMD" probe type. -type cmdProbe struct { - // Run the command with the system's default shell instead of execing it directly. - shell bool -} - -// exec the healthcheck command in the container. -// Returns the exit code and probe output (if any) -func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { - cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] - if p.shell { - if runtime.GOOS != "windows" { - cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) - } else { - cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) - } - } - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) - execConfig := exec.NewConfig() - execConfig.OpenStdin = false - execConfig.OpenStdout = true - execConfig.OpenStderr = true - execConfig.ContainerID = container.ID - execConfig.DetachKeys = []byte{} - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = false - execConfig.Privileged = false - execConfig.User = container.Config.User - - d.registerExecCommand(container, execConfig) - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) - - output := &limitedBuffer{} - err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) - if err != nil { - return nil, err - } - info, err := d.getExecConfig(execConfig.ID) - if err != nil { - return nil, err - } - if info.ExitCode == nil { - return nil, fmt.Errorf("Healthcheck has no exit code!") - } - // Note: Go's json package will handle invalid UTF-8 for us - out := output.String() - return &types.HealthcheckResult{ - End: time.Now(), - ExitCode: *info.ExitCode, - Output: out, - }, nil -} - -// Update the container's Status.Health struct based on the latest probe's result. -func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult) { - c.Lock() - defer c.Unlock() - - retries := c.Config.Healthcheck.Retries - if retries <= 0 { - retries = defaultProbeRetries - } - - h := c.State.Health - oldStatus := h.Status - - if len(h.Log) >= maxLogEntries { - h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) - } else { - h.Log = append(h.Log, result) - } - - if result.ExitCode == exitStatusHealthy { - h.FailingStreak = 0 - h.Status = types.Healthy - } else if result.ExitCode == exitStatusStarting && c.State.Health.Status == types.Starting { - // The container is not ready yet. Remain in the starting state. - } else { - // Failure (including invalid exit code) - h.FailingStreak++ - if c.State.Health.FailingStreak >= retries { - h.Status = types.Unhealthy - } - // Else we're starting or healthy. Stay in that state. - } - - if oldStatus != h.Status { - d.LogContainerEvent(c, "health_status: "+h.Status) - } -} - -// Run the container's monitoring thread until notified via "stop". -// There is never more than one monitor thread running per container at a time. -func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { - probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) - probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) - for { - select { - case <-stop: - logrus.Debug("Stop healthcheck monitoring (received while idle)") - return - case <-time.After(probeInterval): - logrus.Debug("Running health check...") - startTime := time.Now() - ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) - results := make(chan *types.HealthcheckResult) - go func() { - result, err := probe.run(ctx, d, c) - if err != nil { - logrus.Warnf("Health check error: %v", err) - results <- &types.HealthcheckResult{ - ExitCode: -1, - Output: err.Error(), - Start: startTime, - End: time.Now(), - } - } else { - result.Start = startTime - logrus.Debugf("Health check done (exitCode=%d)", result.ExitCode) - results <- result - } - close(results) - }() - select { - case <-stop: - logrus.Debug("Stop healthcheck monitoring (received while probing)") - // Stop timeout and kill probe, but don't wait for probe to exit. - cancelProbe() - return - case result := <-results: - handleProbeResult(d, c, result) - // Stop timeout - cancelProbe() - case <-ctx.Done(): - logrus.Debug("Health check taking too long") - handleProbeResult(d, c, &types.HealthcheckResult{ - ExitCode: -1, - Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), - Start: startTime, - End: time.Now(), - }) - cancelProbe() - // Wait for probe to exit (it might take a while to respond to the TERM - // signal and we don't want dying probes to pile up). - <-results - } - } - } -} - -// Get a suitable probe implementation for the container's healthcheck configuration. -func getProbe(c *container.Container) probe { - config := c.Config.Healthcheck - if config == nil || len(config.Test) == 0 { - return nil - } - switch config.Test[0] { - case "CMD": - return &cmdProbe{shell: false} - case "CMD-SHELL": - return &cmdProbe{shell: true} - default: - logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD')", config.Test[0]) - return nil - } -} - -// Ensure the health-check monitor is running or not, depending on the current -// state of the container. -// Called from monitor.go, with c locked. -func (d *Daemon) updateHealthMonitor(c *container.Container) { - h := c.State.Health - if h == nil { - return // No healthcheck configured - } - - probe := getProbe(c) - wantRunning := c.Running && !c.Paused && probe != nil - if wantRunning { - if stop := h.OpenMonitorChannel(); stop != nil { - go monitor(d, c, stop, probe) - } - } else { - h.CloseMonitorChannel() - } -} - -// Reset the health state for a newly-started, restarted or restored container. -// initHealthMonitor is called from monitor.go and we should never be running -// two instances at once. -// Called with c locked. -func (d *Daemon) initHealthMonitor(c *container.Container) { - if c.Config.Healthcheck == nil { - return - } - - // This is needed in case we're auto-restarting - d.stopHealthchecks(c) - - if c.State.Health == nil { - h := &container.Health{} - h.Status = types.Starting - h.FailingStreak = 0 - c.State.Health = h - } - - d.updateHealthMonitor(c) -} - -// Called when the container is being stopped (whether because the health check is -// failing or for any other reason). -func (d *Daemon) stopHealthchecks(c *container.Container) { - h := c.State.Health - if h != nil { - h.CloseMonitorChannel() - } -} - -// Buffer up to maxOutputLen bytes. Further data is discarded. -type limitedBuffer struct { - buf bytes.Buffer - truncated bool // indicates that data has been lost -} - -// Append to limitedBuffer while there is room. -func (b *limitedBuffer) Write(data []byte) (int, error) { - bufLen := b.buf.Len() - dataLen := len(data) - keep := min(maxOutputLen-bufLen, dataLen) - if keep > 0 { - b.buf.Write(data[:keep]) - } - if keep < dataLen { - b.truncated = true - } - return dataLen, nil -} - -// The contents of the buffer, with "..." appended if it overflowed. -func (b *limitedBuffer) String() string { - out := b.buf.String() - if b.truncated { - out = out + "..." - } - return out -} - -// If configuredValue is zero, use defaultValue instead. -func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { - if configuredValue == 0 { - return defaultValue - } - return configuredValue -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/daemon/health_test.go b/daemon/health_test.go deleted file mode 100644 index f53c32f4f..000000000 --- a/daemon/health_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package daemon - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - eventtypes "github.com/docker/engine-api/types/events" -) - -func reset(c *container.Container) { - c.State = &container.State{} - c.State.Health = &container.Health{} - c.State.Health.Status = types.Starting -} - -func TestHealthStates(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - expect := func(expected string) { - select { - case event := <-l: - ev := event.(eventtypes.Message) - if ev.Status != expected { - t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) - } - case <-time.After(1 * time.Second): - t.Errorf("Expecting event %#v, but got nothing\n", expected) - } - } - - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - - c.Config.Healthcheck = &containertypes.HealthConfig{ - Retries: 1, - } - - reset(c) - - handleResult := func(startTime time.Time, exitCode int) { - handleProbeResult(daemon, c, &types.HealthcheckResult{ - Start: startTime, - End: startTime, - ExitCode: exitCode, - }) - } - - // starting -> failed -> success -> failed - - handleResult(c.State.StartedAt.Add(1*time.Second), 1) - expect("health_status: unhealthy") - - handleResult(c.State.StartedAt.Add(2*time.Second), 0) - expect("health_status: healthy") - - handleResult(c.State.StartedAt.Add(3*time.Second), 1) - expect("health_status: unhealthy") - - // starting -> starting -> starting -> - // healthy -> starting (invalid transition) - - reset(c) - - handleResult(c.State.StartedAt.Add(20*time.Second), 2) - handleResult(c.State.StartedAt.Add(40*time.Second), 2) - if c.State.Health.Status != types.Starting { - t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) - } - - handleResult(c.State.StartedAt.Add(50*time.Second), 0) - expect("health_status: healthy") - handleResult(c.State.StartedAt.Add(60*time.Second), 2) - expect("health_status: unhealthy") - - // Test retries - - reset(c) - c.Config.Healthcheck.Retries = 3 - - handleResult(c.State.StartedAt.Add(20*time.Second), 1) - handleResult(c.State.StartedAt.Add(40*time.Second), 1) - if c.State.Health.Status != types.Starting { - t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) - } - if c.State.Health.FailingStreak != 2 { - t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) - } - handleResult(c.State.StartedAt.Add(60*time.Second), 1) - expect("health_status: unhealthy") - - handleResult(c.State.StartedAt.Add(80*time.Second), 0) - expect("health_status: healthy") - if c.State.Health.FailingStreak != 0 { - t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) - } -} diff --git a/daemon/image.go b/daemon/image.go deleted file mode 100644 index 9a3fa1aea..000000000 --- a/daemon/image.go +++ /dev/null @@ -1,124 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/builder" - "github.com/docker/docker/image" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" -) - -// ErrImageDoesNotExist is error returned when no image can be found for a reference. -type ErrImageDoesNotExist struct { - RefOrID string -} - -func (e ErrImageDoesNotExist) Error() string { - return fmt.Sprintf("no such id: %s", e.RefOrID) -} - -// GetImageID returns an image ID corresponding to the image referred to by -// refOrID. -func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { - id, ref, err := reference.ParseIDOrReference(refOrID) - if err != nil { - return "", err - } - if id != "" { - if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { - return "", ErrImageDoesNotExist{refOrID} - } - return image.ID(id), nil - } - - if id, err := daemon.referenceStore.Get(ref); err == nil { - return id, nil - } - if tagged, ok := ref.(reference.NamedTagged); ok { - if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { - for _, namedRef := range daemon.referenceStore.References(id) { - if namedRef.Name() == ref.Name() { - return id, nil - } - } - } - } - - // Search based on ID - if id, err := daemon.imageStore.Search(refOrID); err == nil { - return id, nil - } - - return "", ErrImageDoesNotExist{refOrID} -} - -// GetImage returns an image corresponding to the image referred to by refOrID. -func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { - imgID, err := daemon.GetImageID(refOrID) - if err != nil { - return nil, err - } - return daemon.imageStore.Get(imgID) -} - -// GetImageOnBuild looks up a Docker image referenced by `name`. -func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - return img, nil -} - -// GetCachedImage returns the most recent created image that is a child -// of the image with imgID, that had the same config when it was -// created. nil is returned if a child cannot be found. An error is -// returned if the parent image cannot be found. -func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Loop on the children of the given image and check the config - getMatch := func(siblings []image.ID) (*image.Image, error) { - var match *image.Image - for _, id := range siblings { - img, err := daemon.imageStore.Get(id) - if err != nil { - return nil, fmt.Errorf("unable to find image %q", id) - } - - if runconfig.Compare(&img.ContainerConfig, config) { - // check for the most up to date match - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil - } - - // In this case, this is `FROM scratch`, which isn't an actual image. - if imgID == "" { - images := daemon.imageStore.Map() - var siblings []image.ID - for id, img := range images { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - return getMatch(siblings) - } - - // find match from child images - siblings := daemon.imageStore.Children(imgID) - return getMatch(siblings) -} - -// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` -// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. -func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { - cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) - if cache == nil || err != nil { - return "", err - } - return cache.ID().String(), nil -} diff --git a/daemon/image_delete.go b/daemon/image_delete.go deleted file mode 100644 index c1af38bf7..000000000 --- a/daemon/image_delete.go +++ /dev/null @@ -1,404 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -type conflictType int - -const ( - conflictDependentChild conflictType = (1 << iota) - conflictRunningContainer - conflictActiveReference - conflictStoppedContainer - conflictHard = conflictDependentChild | conflictRunningContainer - conflictSoft = conflictActiveReference | conflictStoppedContainer -) - -// ImageDelete deletes the image referenced by the given imageRef from this -// daemon. The given imageRef can be an image ID, ID prefix, or a repository -// reference (with an optional tag or digest, defaulting to the tag name -// "latest"). There is differing behavior depending on whether the given -// imageRef is a repository reference or not. -// -// If the given imageRef is a repository reference then that repository -// reference will be removed. However, if there exists any containers which -// were created using the same image reference then the repository reference -// cannot be removed unless either there are other repository references to the -// same image or force is true. Following removal of the repository reference, -// the referenced image itself will attempt to be deleted as described below -// but quietly, meaning any image delete conflicts will cause the image to not -// be deleted and the conflict will not be reported. -// -// There may be conflicts preventing deletion of an image and these conflicts -// are divided into two categories grouped by their severity: -// -// Hard Conflict: -// - a pull or build using the image. -// - any descendant image. -// - any running container using the image. -// -// Soft Conflict: -// - any stopped container using the image. -// - any repository tag or digest references to the image. -// -// The image cannot be removed if there are any hard conflicts and can be -// removed if there are soft conflicts only if force is true. -// -// If prune is true, ancestor images will each attempt to be deleted quietly, -// meaning any delete conflicts will cause the image to not be deleted and the -// conflict will not be reported. -// -// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph -// package. This would require that we no longer need the daemon to determine -// whether images are being used by a stopped or running container. -func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { - records := []types.ImageDelete{} - - imgID, err := daemon.GetImageID(imageRef) - if err != nil { - return nil, daemon.imageNotExistToErrcode(err) - } - - repoRefs := daemon.referenceStore.References(imgID) - - var removedRepositoryRef bool - if !isImageIDPrefix(imgID.String(), imageRef) { - // A repository reference was given and should be removed - // first. We can only remove this reference if either force is - // true, there are multiple repository references to this - // image, or there are no containers using the given reference. - if !(force || len(repoRefs) > 1) { - if container := daemon.getContainerUsingImage(imgID); container != nil { - // If we removed the repository reference then - // this image would remain "dangling" and since - // we really want to avoid that the client must - // explicitly force its removal. - err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) - return nil, errors.NewRequestConflictError(err) - } - } - - parsedRef, err := reference.ParseNamed(imageRef) - if err != nil { - return nil, err - } - - parsedRef, err = daemon.removeImageRef(parsedRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - - repoRefs = daemon.referenceStore.References(imgID) - - // If a tag reference was removed and the only remaining - // references to the same repository are digest references, - // then clean up those digest references. - if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { - foundRepoTagRef := false - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - foundRepoTagRef = true - break - } - } - if !foundRepoTagRef { - // Remove canonical references from same repository - remainingRefs := []reference.Named{} - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := daemon.removeImageRef(repoRef); err != nil { - return records, err - } - - untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} - records = append(records, untaggedRecord) - } else { - remainingRefs = append(remainingRefs, repoRef) - - } - } - repoRefs = remainingRefs - } - } - - // If it has remaining references then the untag finished the remove - if len(repoRefs) > 0 { - return records, nil - } - - removedRepositoryRef = true - } else { - // If an ID reference was given AND there is at most one tag - // reference to the image AND all references are within one - // repository, then remove all references. - if isSingleReference(repoRefs) { - c := conflictHard - if !force { - c |= conflictSoft &^ conflictActiveReference - } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - return nil, conflict - } - - for _, repoRef := range repoRefs { - parsedRef, err := daemon.removeImageRef(repoRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - } - } - } - - return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) -} - -// isSingleReference returns true when all references are from one repository -// and there is at most one tag. Returns false for empty input. -func isSingleReference(repoRefs []reference.Named) bool { - if len(repoRefs) <= 1 { - return len(repoRefs) == 1 - } - var singleRef reference.Named - canonicalRefs := map[string]struct{}{} - for _, repoRef := range repoRefs { - if _, isCanonical := repoRef.(reference.Canonical); isCanonical { - canonicalRefs[repoRef.Name()] = struct{}{} - } else if singleRef == nil { - singleRef = repoRef - } else { - return false - } - } - if singleRef == nil { - // Just use first canonical ref - singleRef = repoRefs[0] - } - _, ok := canonicalRefs[singleRef.Name()] - return len(canonicalRefs) == 1 && ok -} - -// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the -// given imageID. -func isImageIDPrefix(imageID, possiblePrefix string) bool { - if strings.HasPrefix(imageID, possiblePrefix) { - return true - } - - if i := strings.IndexRune(imageID, ':'); i >= 0 { - return strings.HasPrefix(imageID[i+1:], possiblePrefix) - } - - return false -} - -// getContainerUsingImage returns a container that was created using the given -// imageID. Returns nil if there is no such container. -func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { - return daemon.containers.First(func(c *container.Container) bool { - return c.ImageID == imageID - }) -} - -// removeImageRef attempts to parse and remove the given image reference from -// this daemon's store of repository tag/digest references. The given -// repositoryRef must not be an image ID but a repository name followed by an -// optional tag or digest reference. If tag or digest is omitted, the default -// tag is used. Returns the resolved image reference and an error. -func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { - ref = reference.WithDefaultTag(ref) - // Ignore the boolean value returned, as far as we're concerned, this - // is an idempotent operation and it's okay if the reference didn't - // exist in the first place. - _, err := daemon.referenceStore.Delete(ref) - - return ref, err -} - -// removeAllReferencesToImageID attempts to remove every reference to the given -// imgID from this daemon's store of repository tag/digest references. Returns -// on the first encountered error. Removed references are logged to this -// daemon's event service. An "Untagged" types.ImageDelete is added to the -// given list of records. -func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { - imageRefs := daemon.referenceStore.References(imgID) - - for _, imageRef := range imageRefs { - parsedRef, err := daemon.removeImageRef(imageRef) - if err != nil { - return err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - *records = append(*records, untaggedRecord) - } - - return nil -} - -// ImageDeleteConflict holds a soft or hard conflict and an associated error. -// Implements the error interface. -type imageDeleteConflict struct { - hard bool - used bool - imgID image.ID - message string -} - -func (idc *imageDeleteConflict) Error() string { - var forceMsg string - if idc.hard { - forceMsg = "cannot be forced" - } else { - forceMsg = "must be forced" - } - - return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) -} - -// imageDeleteHelper attempts to delete the given image from this daemon. If -// the image has any hard delete conflicts (child images or running containers -// using the image) then it cannot be deleted. If the image has any soft delete -// conflicts (any tags/digests referencing the image or any stopped container -// using the image) then it can only be deleted if force is true. If the delete -// succeeds and prune is true, the parent images are also deleted if they do -// not have any soft or hard delete conflicts themselves. Any deleted images -// and untagged references are appended to the given records. If any error or -// conflict is encountered, it will be returned immediately without deleting -// the image. If quiet is true, any encountered conflicts will be ignored and -// the function will return nil immediately without deleting the image. -func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { - // First, determine if this image has any conflicts. Ignore soft conflicts - // if force is true. - c := conflictHard - if !force { - c |= conflictSoft - } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { - // Ignore conflicts UNLESS the image is "dangling" or not being used in - // which case we want the user to know. - return nil - } - - // There was a conflict and it's either a hard conflict OR we are not - // forcing deletion on soft conflicts. - return conflict - } - - parent, err := daemon.imageStore.GetParent(imgID) - if err != nil { - // There may be no parent - parent = "" - } - - // Delete all repository tag/digest references to this image. - if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { - return err - } - - removedLayers, err := daemon.imageStore.Delete(imgID) - if err != nil { - return err - } - - daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") - *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) - for _, removedLayer := range removedLayers { - *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) - } - - if !prune || parent == "" { - return nil - } - - // We need to prune the parent image. This means delete it if there are - // no tags/digests referencing it and there are no containers using it ( - // either running or stopped). - // Do not force prunings, but do so quietly (stopping on any encountered - // conflicts). - return daemon.imageDeleteHelper(parent, records, false, true, true) -} - -// checkImageDeleteConflict determines whether there are any conflicts -// preventing deletion of the given image from this daemon. A hard conflict is -// any image which has the given image as a parent or any running container -// using the image. A soft conflict is any tags/digest referencing the given -// image or any stopped container using the image. If ignoreSoftConflicts is -// true, this function will not check for soft conflict conditions. -func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { - // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { - return &imageDeleteConflict{ - hard: true, - imgID: imgID, - message: "image has dependent child images", - } - } - - if mask&conflictRunningContainer != 0 { - // Check if any running container is using the image. - running := func(c *container.Container) bool { - return c.IsRunning() && c.ImageID == imgID - } - if container := daemon.containers.First(running); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - hard: true, - used: true, - message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), - } - } - } - - // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { - return &imageDeleteConflict{ - imgID: imgID, - message: "image is referenced in one or more repositories", - } - } - - if mask&conflictStoppedContainer != 0 { - // Check if any stopped containers reference this image. - stopped := func(c *container.Container) bool { - return !c.IsRunning() && c.ImageID == imgID - } - if container := daemon.containers.First(stopped); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - used: true, - message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), - } - } - } - - return nil -} - -// imageIsDangling returns whether the given image is "dangling" which means -// that there are no repository references to the given image and it has no -// child images. -func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { - return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) -} diff --git a/daemon/image_exporter.go b/daemon/image_exporter.go deleted file mode 100644 index 95d1d3dcd..000000000 --- a/daemon/image_exporter.go +++ /dev/null @@ -1,25 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/docker/docker/image/tarexport" -) - -// ExportImage exports a list of images to the given output stream. The -// exported images are archived into a tar when written to the output -// stream. All images with the given tag and all versions containing -// the same tag are exported. names is the set of tags to export, and -// outStream is the writer which the images are written to. -func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Save(names, outStream) -} - -// LoadImage uploads a set of images into the repository. This is the -// complement of ImageExport. The input stream is an uncompressed tar -// ball containing images and metadata. -func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Load(inTar, outStream, quiet) -} diff --git a/daemon/image_history.go b/daemon/image_history.go deleted file mode 100644 index 05140d368..000000000 --- a/daemon/image_history.go +++ /dev/null @@ -1,82 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// ImageHistory returns a slice of ImageHistory structures for the specified image -// name by walking the image lineage. -func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - - history := []*types.ImageHistory{} - - layerCounter := 0 - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - for _, h := range img.History { - var layerSize int64 - - if !h.EmptyLayer { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, fmt.Errorf("too many non-empty layers in History section") - } - - rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := daemon.layerStore.Get(rootFS.ChainID()) - if err != nil { - return nil, err - } - layerSize, err = l.DiffSize() - layer.ReleaseAndLog(daemon.layerStore, l) - if err != nil { - return nil, err - } - - layerCounter++ - } - - history = append([]*types.ImageHistory{{ - ID: "", - Created: h.Created.Unix(), - CreatedBy: h.CreatedBy, - Comment: h.Comment, - Size: layerSize, - }}, history...) - } - - // Fill in image IDs and tags - histImg := img - id := img.ID() - for _, h := range history { - h.ID = id.String() - - var tags []string - for _, r := range daemon.referenceStore.References(id) { - if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, r.String()) - } - } - - h.Tags = tags - - id = histImg.Parent - if id == "" { - break - } - histImg, err = daemon.GetImage(id.String()) - if err != nil { - break - } - } - - return history, nil -} diff --git a/daemon/image_inspect.go b/daemon/image_inspect.go deleted file mode 100644 index 5b0022688..000000000 --- a/daemon/image_inspect.go +++ /dev/null @@ -1,81 +0,0 @@ -package daemon - -import ( - "fmt" - "time" - - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// LookupImage looks up an image by name and returns it as an ImageInspect -// structure. -func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, fmt.Errorf("No such image: %s", name) - } - - refs := daemon.referenceStore.References(img.ID()) - repoTags := []string{} - repoDigests := []string{} - for _, ref := range refs { - switch ref.(type) { - case reference.NamedTagged: - repoTags = append(repoTags, ref.String()) - case reference.Canonical: - repoDigests = append(repoDigests, ref.String()) - } - } - - var size int64 - var layerMetadata map[string]string - layerID := img.RootFS.ChainID() - if layerID != "" { - l, err := daemon.layerStore.Get(layerID) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - size, err = l.Size() - if err != nil { - return nil, err - } - - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } - } - - comment := img.Comment - if len(comment) == 0 && len(img.History) > 0 { - comment = img.History[len(img.History)-1].Comment - } - - imageInspect := &types.ImageInspect{ - ID: img.ID().String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: img.Parent.String(), - Comment: comment, - Created: img.Created.Format(time.RFC3339Nano), - Container: img.Container, - ContainerConfig: &img.ContainerConfig, - DockerVersion: img.DockerVersion, - Author: img.Author, - Config: img.Config, - Architecture: img.Architecture, - Os: img.OS, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate - RootFS: rootFSToAPIType(img.RootFS), - } - - imageInspect.GraphDriver.Name = daemon.GraphDriverName() - - imageInspect.GraphDriver.Data = layerMetadata - - return imageInspect, nil -} diff --git a/daemon/image_pull.go b/daemon/image_pull.go deleted file mode 100644 index 06ffa06fb..000000000 --- a/daemon/image_pull.go +++ /dev/null @@ -1,106 +0,0 @@ -package daemon - -import ( - "io" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/builder" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PullImage initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - // Special case: "pull -a" may send an image name with a - // trailing :. This is ugly, but let's not break API - // compatibility. - image = strings.TrimSuffix(image, ":") - - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - if tag != "" { - // The "tag" could actually be a digest. - var dgst digest.Digest - dgst, err = digest.ParseDigest(tag) - if err == nil { - ref, err = reference.WithDigest(ref, dgst) - } else { - ref, err = reference.WithTag(ref, tag) - } - if err != nil { - return err - } - } - - return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) -} - -// PullOnBuild tells Docker to pull image referenced by `name`. -func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - return nil, err - } - ref = reference.WithDefaultTag(ref) - - pullRegistryAuth := &types.AuthConfig{} - if len(authConfigs) > 0 { - // The request came with a full auth config file, we prefer to use that - repoInfo, err := daemon.RegistryService.ResolveRepository(ref) - if err != nil { - return nil, err - } - - resolvedConfig := registry.ResolveAuthConfig( - authConfigs, - repoInfo.Index, - ) - pullRegistryAuth = &resolvedConfig - } - - if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { - return nil, err - } - return daemon.GetImage(name) -} - -func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - writeDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePullConfig := &distribution.ImagePullConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: daemon.imageStore, - ReferenceStore: daemon.referenceStore, - DownloadManager: daemon.downloadManager, - } - - err := distribution.Pull(ctx, ref, imagePullConfig) - close(progressChan) - <-writesDone - return err -} diff --git a/daemon/image_push.go b/daemon/image_push.go deleted file mode 100644 index 11c89709f..000000000 --- a/daemon/image_push.go +++ /dev/null @@ -1,58 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PushImage initiates a push operation on the repository named localName. -func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - if tag != "" { - // Push by digest is not supported, so only tags are supported. - ref, err = reference.WithTag(ref, tag) - if err != nil { - return err - } - } - - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - writeDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePushConfig := &distribution.ImagePushConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - LayerStore: daemon.layerStore, - ImageStore: daemon.imageStore, - ReferenceStore: daemon.referenceStore, - TrustKey: daemon.trustKey, - UploadManager: daemon.uploadManager, - } - - err = distribution.Push(ctx, ref, imagePushConfig) - close(progressChan) - <-writesDone - return err -} diff --git a/daemon/image_tag.go b/daemon/image_tag.go deleted file mode 100644 index 01127d470..000000000 --- a/daemon/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/image" - "github.com/docker/docker/reference" -) - -// TagImage creates the tag specified by newTag, pointing to the image named -// imageName (alternatively, imageName can also be an image ID). -func (daemon *Daemon) TagImage(imageName, repository, tag string) error { - imageID, err := daemon.GetImageID(imageName) - if err != nil { - return err - } - - newTag, err := reference.WithName(repository) - if err != nil { - return err - } - if tag != "" { - if newTag, err = reference.WithTag(newTag, tag); err != nil { - return err - } - } - - return daemon.TagImageWithReference(imageID, newTag) -} - -// TagImageWithReference adds the given reference to the image ID provided. -func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { - return err - } - - daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") - return nil -} diff --git a/daemon/images.go b/daemon/images.go deleted file mode 100644 index 0060cdafb..000000000 --- a/daemon/images.go +++ /dev/null @@ -1,193 +0,0 @@ -package daemon - -import ( - "fmt" - "path" - "sort" - - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -var acceptedImageFilterTags = map[string]bool{ - "dangling": true, - "label": true, - "before": true, - "since": true, -} - -// byCreated is a temporary type used to sort a list of images by creation -// time. -type byCreated []*types.Image - -func (r byCreated) Len() int { return len(r) } -func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } - -// Map returns a map of all images in the ImageStore -func (daemon *Daemon) Map() map[image.ID]*image.Image { - return daemon.imageStore.Map() -} - -// Images returns a filtered list of images. filterArgs is a JSON-encoded set -// of filter arguments which will be interpreted by api/types/filters. -// filter is a shell glob string applied to repository names. The argument -// named all controls whether all images in the graph are filtered, or just -// the heads. -func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { - var ( - allImages map[image.ID]*image.Image - err error - danglingOnly = false - ) - - imageFilters, err := filters.FromParam(filterArgs) - if err != nil { - return nil, err - } - if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { - return nil, err - } - - if imageFilters.Include("dangling") { - if imageFilters.ExactMatch("dangling", "true") { - danglingOnly = true - } else if !imageFilters.ExactMatch("dangling", "false") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) - } - } - if danglingOnly { - allImages = daemon.imageStore.Heads() - } else { - allImages = daemon.imageStore.Map() - } - - var beforeFilter, sinceFilter *image.Image - err = imageFilters.WalkValues("before", func(value string) error { - beforeFilter, err = daemon.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - err = imageFilters.WalkValues("since", func(value string) error { - sinceFilter, err = daemon.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - images := []*types.Image{} - - var filterTagged bool - if filter != "" { - filterRef, err := reference.ParseNamed(filter) - if err == nil { // parse error means wildcard repo - if _, ok := filterRef.(reference.NamedTagged); ok { - filterTagged = true - } - } - } - - for id, img := range allImages { - if beforeFilter != nil { - if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { - continue - } - } - - if sinceFilter != nil { - if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { - continue - } - } - - if imageFilters.Include("label") { - // Very old image that do not have image.Config (or even labels) - if img.Config == nil { - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", img.Config.Labels) { - continue - } - } - - layerID := img.RootFS.ChainID() - var size int64 - if layerID != "" { - l, err := daemon.layerStore.Get(layerID) - if err != nil { - return nil, err - } - - size, err = l.Size() - layer.ReleaseAndLog(daemon.layerStore, l) - if err != nil { - return nil, err - } - } - - newImage := newImage(img, size) - - for _, ref := range daemon.referenceStore.References(id) { - if filter != "" { // filter by tag/repo name - if filterTagged { // filter by tag, require full ref match - if ref.String() != filter { - continue - } - } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact - continue - } - } - if _, ok := ref.(reference.Canonical); ok { - newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) - } - if _, ok := ref.(reference.NamedTagged); ok { - newImage.RepoTags = append(newImage.RepoTags, ref.String()) - } - } - if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(daemon.imageStore.Children(id)) == 0 { - - if imageFilters.Include("dangling") && !danglingOnly { - //dangling=false case, so dangling image is not needed - continue - } - if filter != "" { // skip images with no references if filtering by tag - continue - } - newImage.RepoDigests = []string{"@"} - newImage.RepoTags = []string{":"} - } else { - continue - } - } else if danglingOnly && len(newImage.RepoTags) > 0 { - continue - } - - images = append(images, newImage) - } - - sort.Sort(sort.Reverse(byCreated(images))) - - return images, nil -} - -func newImage(image *image.Image, size int64) *types.Image { - newImage := new(types.Image) - newImage.ParentID = image.Parent.String() - newImage.ID = image.ID().String() - newImage.Created = image.Created.Unix() - newImage.Size = size - newImage.VirtualSize = size - if image.Config != nil { - newImage.Labels = image.Config.Labels - } - return newImage -} diff --git a/daemon/import.go b/daemon/import.go deleted file mode 100644 index b980f210f..000000000 --- a/daemon/import.go +++ /dev/null @@ -1,135 +0,0 @@ -package daemon - -import ( - "encoding/json" - "errors" - "io" - "net/http" - "net/url" - "runtime" - "time" - - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types/container" -) - -// ImportImage imports an image, getting the archived layer data either from -// inConfig (if src is "-"), or from a URI specified in src. Progress output is -// written to outStream. Repository and tag names can optionally be given in -// the repo and tag arguments, respectively. -func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - rc io.ReadCloser - resp *http.Response - newRef reference.Named - ) - - if repository != "" { - var err error - newRef, err = reference.ParseNamed(repository) - if err != nil { - return err - } - - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errors.New("cannot import digest reference") - } - - if tag != "" { - newRef, err = reference.WithTag(newRef, tag) - if err != nil { - return err - } - } - } - - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) - if err != nil { - return err - } - if src == "-" { - rc = inConfig - } else { - inConfig.Close() - u, err := url.Parse(src) - if err != nil { - return err - } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) - resp, err = httputils.Download(u.String()) - if err != nil { - return err - } - progressOutput := sf.NewProgressOutput(outStream, true) - rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") - } - - defer rc.Close() - if len(msg) == 0 { - msg = "Imported from " + src - } - - inflatedLayerData, err := archive.DecompressStream(rc) - if err != nil { - return err - } - // TODO: support windows baselayer? - l, err := daemon.layerStore.Register(inflatedLayerData, "") - if err != nil { - return err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - - created := time.Now().UTC() - imgConfig, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: config, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Created: created, - Comment: msg, - }, - RootFS: &image.RootFS{ - Type: "layers", - DiffIDs: []layer.DiffID{l.DiffID()}, - }, - History: []image.History{{ - Created: created, - Comment: msg, - }}, - }) - if err != nil { - return err - } - - id, err := daemon.imageStore.Create(imgConfig) - if err != nil { - return err - } - - // FIXME: connect with commit code and call refstore directly - if newRef != nil { - if err := daemon.TagImageWithReference(id, newRef); err != nil { - return err - } - } - - daemon.LogImageEvent(id.String(), id.String(), "import") - outStream.Write(sf.FormatStatus("", id.String())) - return nil -} diff --git a/daemon/info.go b/daemon/info.go deleted file mode 100644 index c42dae06d..000000000 --- a/daemon/info.go +++ /dev/null @@ -1,186 +0,0 @@ -package daemon - -import ( - "os" - "runtime" - "sync/atomic" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/parsers/operatingsystem" - "github.com/docker/docker/pkg/platform" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/docker/volume/drivers" - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" -) - -// SystemInfo returns information about the host server the daemon is running on. -func (daemon *Daemon) SystemInfo() (*types.Info, error) { - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - - operatingSystem := "" - if s, err := operatingsystem.GetOperatingSystem(); err != nil { - logrus.Warnf("Could not get operating system name: %v", err) - } else { - operatingSystem = s - } - - // Don't do containerized check on Windows - if runtime.GOOS != "windows" { - if inContainer, err := operatingsystem.IsContainerized(); err != nil { - logrus.Errorf("Could not determine if daemon is containerized: %v", err) - operatingSystem += " (error determining if containerized)" - } else if inContainer { - operatingSystem += " (containerized)" - } - } - - meminfo, err := system.ReadMemInfo() - if err != nil { - logrus.Errorf("Could not read system memory info: %v", err) - meminfo = &system.MemInfo{} - } - - sysInfo := sysinfo.New(true) - - var cRunning, cPaused, cStopped int32 - daemon.containers.ApplyAll(func(c *container.Container) { - switch c.StateString() { - case "paused": - atomic.AddInt32(&cPaused, 1) - case "running": - atomic.AddInt32(&cRunning, 1) - default: - atomic.AddInt32(&cStopped, 1) - } - }) - - var securityOptions []string - if sysInfo.AppArmor { - securityOptions = append(securityOptions, "apparmor") - } - if sysInfo.Seccomp && supportsSeccomp { - securityOptions = append(securityOptions, "seccomp") - } - if selinuxEnabled() { - securityOptions = append(securityOptions, "selinux") - } - - v := &types.Info{ - ID: daemon.ID, - Containers: int(cRunning + cPaused + cStopped), - ContainersRunning: int(cRunning), - ContainersPaused: int(cPaused), - ContainersStopped: int(cStopped), - Images: len(daemon.imageStore.Map()), - Driver: daemon.GraphDriverName(), - DriverStatus: daemon.layerStore.DriverStatus(), - Plugins: daemon.showPluginsInfo(), - IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, - BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, - BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, - Debug: utils.IsDebugEnabled(), - NFd: fileutils.GetTotalUsedFds(), - NGoroutines: runtime.NumGoroutine(), - SystemTime: time.Now().Format(time.RFC3339Nano), - LoggingDriver: daemon.defaultLogConfig.Type, - CgroupDriver: daemon.getCgroupDriver(), - NEventsListener: daemon.EventsService.SubscribersCount(), - KernelVersion: kernelVersion, - OperatingSystem: operatingSystem, - IndexServerAddress: registry.IndexServer, - OSType: platform.OSType, - Architecture: platform.Architecture, - RegistryConfig: daemon.RegistryService.ServiceConfig(), - NCPU: sysinfo.NumCPU(), - MemTotal: meminfo.MemTotal, - DockerRootDir: daemon.configStore.Root, - Labels: daemon.configStore.Labels, - ExperimentalBuild: utils.ExperimentalBuild(), - ServerVersion: dockerversion.Version, - ClusterStore: daemon.configStore.ClusterStore, - ClusterAdvertise: daemon.configStore.ClusterAdvertise, - HTTPProxy: sockets.GetProxyEnv("http_proxy"), - HTTPSProxy: sockets.GetProxyEnv("https_proxy"), - NoProxy: sockets.GetProxyEnv("no_proxy"), - SecurityOptions: securityOptions, - } - - // TODO Windows. Refactor this more once sysinfo is refactored into - // platform specific code. On Windows, sysinfo.cgroupMemInfo and - // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if - // an attempt is made to access through them. - if runtime.GOOS != "windows" { - v.MemoryLimit = sysInfo.MemoryLimit - v.SwapLimit = sysInfo.SwapLimit - v.KernelMemory = sysInfo.KernelMemory - v.OomKillDisable = sysInfo.OomKillDisable - v.CPUCfsPeriod = sysInfo.CPUCfsPeriod - v.CPUCfsQuota = sysInfo.CPUCfsQuota - v.CPUShares = sysInfo.CPUShares - v.CPUSet = sysInfo.Cpuset - v.Runtimes = daemon.configStore.GetAllRuntimes() - v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() - } - - hostname := "" - if hn, err := os.Hostname(); err != nil { - logrus.Warnf("Could not get hostname: %v", err) - } else { - hostname = hn - } - v.Name = hostname - - return v, nil -} - -// SystemVersion returns version information about the daemon. -func (daemon *Daemon) SystemVersion() types.Version { - v := types.Version{ - Version: dockerversion.Version, - GitCommit: dockerversion.GitCommit, - GoVersion: runtime.Version(), - Os: runtime.GOOS, - Arch: runtime.GOARCH, - BuildTime: dockerversion.BuildTime, - Experimental: utils.ExperimentalBuild(), - } - - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - v.KernelVersion = kernelVersion - - return v -} - -func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { - var pluginsInfo types.PluginsInfo - - pluginsInfo.Volume = volumedrivers.GetDriverList() - - networkDriverList := daemon.GetNetworkDriverList() - for nd := range networkDriverList { - pluginsInfo.Network = append(pluginsInfo.Network, nd) - } - - pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins - - return pluginsInfo -} diff --git a/daemon/inspect.go b/daemon/inspect.go deleted file mode 100644 index 6499fb89c..000000000 --- a/daemon/inspect.go +++ /dev/null @@ -1,250 +0,0 @@ -package daemon - -import ( - "fmt" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/engine-api/types" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/versions" - "github.com/docker/engine-api/types/versions/v1p20" -) - -// ContainerInspect returns low-level information about a -// container. Returns an error if the container cannot be found, or if -// there is an error getting the data. -func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { - switch { - case versions.LessThan(version, "1.20"): - return daemon.containerInspectPre120(name) - case versions.Equal(version, "1.20"): - return daemon.containerInspect120(name) - } - return daemon.ContainerInspectCurrent(name, size) -} - -// ContainerInspectCurrent returns low-level information about a -// container in a most recent api version. -func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, size) - if err != nil { - return nil, err - } - - mountPoints := addMountPoints(container) - networkSettings := &types.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: container.NetworkSettings.Bridge, - SandboxID: container.NetworkSettings.SandboxID, - HairpinMode: container.NetworkSettings.HairpinMode, - LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, - Ports: container.NetworkSettings.Ports, - SandboxKey: container.NetworkSettings.SandboxKey, - SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, - SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), - Networks: container.NetworkSettings.Networks, - } - - return &types.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: container.Config, - NetworkSettings: networkSettings, - }, nil -} - -// containerInspect120 serializes the master version of a container into a json type. -func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, false) - if err != nil { - return nil, err - } - - mountPoints := addMountPoints(container) - config := &v1p20.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p20.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { - // make a copy to play with - hostConfig := *container.HostConfig - - children := daemon.children(container) - hostConfig.Links = nil // do not expose the internal structure - for linkAlias, child := range children { - hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) - } - - var containerHealth *types.Health - if container.State.Health != nil { - containerHealth = &types.Health{ - Status: container.State.Health.Status, - FailingStreak: container.State.Health.FailingStreak, - Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), - } - } - - containerState := &types.ContainerState{ - Status: container.State.StateString(), - Running: container.State.Running, - Paused: container.State.Paused, - Restarting: container.State.Restarting, - OOMKilled: container.State.OOMKilled, - Dead: container.State.Dead, - Pid: container.State.Pid, - ExitCode: container.State.ExitCode(), - Error: container.State.Error(), - StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), - FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), - Health: containerHealth, - } - - contJSONBase := &types.ContainerJSONBase{ - ID: container.ID, - Created: container.Created.Format(time.RFC3339Nano), - Path: container.Path, - Args: container.Args, - State: containerState, - Image: container.ImageID.String(), - LogPath: container.LogPath, - Name: container.Name, - RestartCount: container.RestartCount, - Driver: container.Driver, - MountLabel: container.MountLabel, - ProcessLabel: container.ProcessLabel, - ExecIDs: container.GetExecIDs(), - HostConfig: &hostConfig, - } - - var ( - sizeRw int64 - sizeRootFs int64 - ) - if size { - sizeRw, sizeRootFs = daemon.getSize(container) - contJSONBase.SizeRw = &sizeRw - contJSONBase.SizeRootFs = &sizeRootFs - } - - // Now set any platform-specific fields - contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) - - contJSONBase.GraphDriver.Name = container.Driver - - graphDriverData, err := container.RWLayer.Metadata() - if err != nil { - return nil, err - } - contJSONBase.GraphDriver.Data = graphDriverData - - return contJSONBase, nil -} - -// ContainerExecInspect returns low-level information about the exec -// command. An error is returned if the exec cannot be found. -func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { - e, err := daemon.getExecConfig(id) - if err != nil { - return nil, err - } - - pc := inspectExecProcessConfig(e) - - return &backend.ExecInspect{ - ID: e.ID, - Running: e.Running, - ExitCode: e.ExitCode, - ProcessConfig: pc, - OpenStdin: e.OpenStdin, - OpenStdout: e.OpenStdout, - OpenStderr: e.OpenStderr, - CanRemove: e.CanRemove, - ContainerID: e.ContainerID, - DetachKeys: e.DetachKeys, - }, nil -} - -// VolumeInspect looks up a volume by name. An error is returned if -// the volume cannot be found. -func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { - v, err := daemon.volumes.Get(name) - if err != nil { - return nil, err - } - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - apiV.Status = v.Status() - return apiV, nil -} - -func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { - result := &v1p20.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: settings.Bridge, - SandboxID: settings.SandboxID, - HairpinMode: settings.HairpinMode, - LinkLocalIPv6Address: settings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, - Ports: settings.Ports, - SandboxKey: settings.SandboxKey, - SecondaryIPAddresses: settings.SecondaryIPAddresses, - SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), - } - - return result -} - -// getDefaultNetworkSettings creates the deprecated structure that holds the information -// about the bridge network for a container. -func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { - var settings types.DefaultNetworkSettings - - if defaultNetwork, ok := networks["bridge"]; ok { - settings.EndpointID = defaultNetwork.EndpointID - settings.Gateway = defaultNetwork.Gateway - settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address - settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen - settings.IPAddress = defaultNetwork.IPAddress - settings.IPPrefixLen = defaultNetwork.IPPrefixLen - settings.IPv6Gateway = defaultNetwork.IPv6Gateway - settings.MacAddress = defaultNetwork.MacAddress - } - return settings -} diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go deleted file mode 100644 index 2e49bef3a..000000000 --- a/daemon/inspect_solaris.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.containerInspectCurrent(name, false) -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/daemon/inspect_unix.go b/daemon/inspect_unix.go deleted file mode 100644 index 9d72d145d..000000000 --- a/daemon/inspect_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !windows,!solaris - -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions/v1p19" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - contJSONBase.AppArmorProfile = container.AppArmorProfile - contJSONBase.ResolvConfPath = container.ResolvConfPath - contJSONBase.HostnamePath = container.HostnamePath - contJSONBase.HostsPath = container.HostsPath - - return contJSONBase -} - -// containerInspectPre120 gets containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, false) - if err != nil { - return nil, err - } - - volumes := make(map[string]string) - volumesRW := make(map[string]bool) - for _, m := range container.MountPoints { - volumes[m.Destination] = m.Path() - volumesRW[m.Destination] = m.RW - } - - config := &v1p19.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - Memory: container.HostConfig.Memory, - MemorySwap: container.HostConfig.MemorySwap, - CPUShares: container.HostConfig.CPUShares, - CPUSet: container.HostConfig.CpusetCpus, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p19.ContainerJSON{ - ContainerJSONBase: base, - Volumes: volumes, - VolumesRW: volumesRW, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - Mode: m.Mode, - RW: m.RW, - Propagation: m.Propagation, - }) - } - return mountPoints -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - Privileged: &e.Privileged, - User: e.User, - } -} diff --git a/daemon/inspect_windows.go b/daemon/inspect_windows.go deleted file mode 100644 index a23f703e0..000000000 --- a/daemon/inspect_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.ContainerInspectCurrent(name, false) -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/daemon/keys.go b/daemon/keys.go deleted file mode 100644 index 055d488a5..000000000 --- a/daemon/keys.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build linux - -package daemon - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -const ( - rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" - rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" - rootKeyLimit = 1000000 - // it is standard configuration to allocate 25 bytes per key - rootKeyByteMultiplier = 25 -) - -// ModifyRootKeyLimit checks to see if the root key limit is set to -// at least 1000000 and changes it to that limit along with the maxbytes -// allocated to the keys at a 25 to 1 multiplier. -func ModifyRootKeyLimit() error { - value, err := readRootKeyLimit(rootKeyFile) - if err != nil { - return err - } - if value < rootKeyLimit { - return setRootKeyLimit(rootKeyLimit) - } - return nil -} - -func setRootKeyLimit(limit int) error { - keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer keys.Close() - if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { - return err - } - bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer bytes.Close() - _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) - return err -} - -func readRootKeyLimit(path string) (int, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return -1, err - } - return strconv.Atoi(strings.Trim(string(data), "\n")) -} diff --git a/daemon/keys_unsupported.go b/daemon/keys_unsupported.go deleted file mode 100644 index b17255940..000000000 --- a/daemon/keys_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux - -package daemon - -// ModifyRootKeyLimit is an noop on unsupported platforms. -func ModifyRootKeyLimit() error { - return nil -} diff --git a/daemon/kill.go b/daemon/kill.go deleted file mode 100644 index a140a9d5e..000000000 --- a/daemon/kill.go +++ /dev/null @@ -1,157 +0,0 @@ -package daemon - -import ( - "fmt" - "runtime" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/signal" -) - -type errNoSuchProcess struct { - pid int - signal int -} - -func (e errNoSuchProcess) Error() string { - return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) -} - -// isErrNoSuchProcess returns true if the error -// is an instance of errNoSuchProcess. -func isErrNoSuchProcess(err error) bool { - _, ok := err.(errNoSuchProcess) - return ok -} - -// ContainerKill sends signal to the container -// If no signal is given (sig 0), then Kill with SIGKILL and wait -// for the container to exit. -// If a signal is given, then just send it to the container and return. -func (daemon *Daemon) ContainerKill(name string, sig uint64) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { - return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) - } - - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - return daemon.Kill(container) - } - return daemon.killWithSignal(container, int(sig)) -} - -// killWithSignal sends the container the given signal. This wrapper for the -// host specific kill command prepares the container before attempting -// to send the signal. An error is returned if the container is paused -// or not running, or if there is a problem returned from the -// underlying kill command. -func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { - logrus.Debugf("Sending %d to %s", sig, container.ID) - container.Lock() - defer container.Unlock() - - // We could unpause the container for them rather than returning this error - if container.Paused { - return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) - } - - if !container.Running { - return errNotRunning{container.ID} - } - - container.ExitOnNext() - - if !daemon.IsShuttingDown() { - container.HasBeenManuallyStopped = true - } - - // if the container is currently restarting we do not need to send the signal - // to the process. Telling the monitor that it should exit on it's next event - // loop is enough - if container.Restarting { - return nil - } - - if err := daemon.kill(container, sig); err != nil { - err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) - // if container or process not exists, ignore the error - if strings.Contains(err.Error(), "container not found") || - strings.Contains(err.Error(), "no such process") { - logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) - } else { - return err - } - } - - attributes := map[string]string{ - "signal": fmt.Sprintf("%d", sig), - } - daemon.LogContainerEventWithAttributes(container, "kill", attributes) - return nil -} - -// Kill forcefully terminates a container. -func (daemon *Daemon) Kill(container *container.Container) error { - if !container.IsRunning() { - return errNotRunning{container.ID} - } - - // 1. Send SIGKILL - if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { - // While normally we might "return err" here we're not going to - // because if we can't stop the container by this point then - // its probably because its already stopped. Meaning, between - // the time of the IsRunning() call above and now it stopped. - // Also, since the err return will be environment specific we can't - // look for any particular (common) error that would indicate - // that the process is already dead vs something else going wrong. - // So, instead we'll give it up to 2 more seconds to complete and if - // by that time the container is still running, then the error - // we got is probably valid and so we return it to the caller. - if isErrNoSuchProcess(err) { - return nil - } - - if container.IsRunning() { - container.WaitStop(2 * time.Second) - if container.IsRunning() { - return err - } - } - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := killProcessDirectly(container); err != nil { - if isErrNoSuchProcess(err) { - return nil - } - return err - } - - container.WaitStop(-1 * time.Second) - return nil -} - -// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. -func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { - err := daemon.killWithSignal(container, sig) - if err == syscall.ESRCH { - e := errNoSuchProcess{container.GetPID(), sig} - logrus.Debug(e) - return e - } - return err -} - -func (daemon *Daemon) kill(c *container.Container, sig int) error { - return daemon.containerd.Signal(c.ID, sig) -} diff --git a/daemon/links.go b/daemon/links.go deleted file mode 100644 index aaf1917d7..000000000 --- a/daemon/links.go +++ /dev/null @@ -1,128 +0,0 @@ -package daemon - -import ( - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" -) - -// linkIndex stores link relationships between containers, including their specified alias -// The alias is the name the parent uses to reference the child -type linkIndex struct { - // idx maps a parent->alias->child relationship - idx map[*container.Container]map[string]*container.Container - // childIdx maps child->parent->aliases - childIdx map[*container.Container]map[*container.Container]map[string]struct{} - mu sync.Mutex -} - -func newLinkIndex() *linkIndex { - return &linkIndex{ - idx: make(map[*container.Container]map[string]*container.Container), - childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), - } -} - -// link adds indexes for the passed in parent/child/alias relationships -func (l *linkIndex) link(parent, child *container.Container, alias string) { - l.mu.Lock() - - if l.idx[parent] == nil { - l.idx[parent] = make(map[string]*container.Container) - } - l.idx[parent][alias] = child - if l.childIdx[child] == nil { - l.childIdx[child] = make(map[*container.Container]map[string]struct{}) - } - if l.childIdx[child][parent] == nil { - l.childIdx[child][parent] = make(map[string]struct{}) - } - l.childIdx[child][parent][alias] = struct{}{} - - l.mu.Unlock() -} - -// unlink removes the requested alias for the given parent/child -func (l *linkIndex) unlink(alias string, child, parent *container.Container) { - l.mu.Lock() - delete(l.idx[parent], alias) - delete(l.childIdx[child], parent) - l.mu.Unlock() -} - -// children maps all the aliases-> children for the passed in parent -// aliases here are the aliases the parent uses to refer to the child -func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { - l.mu.Lock() - children := l.idx[parent] - l.mu.Unlock() - return children -} - -// parents maps all the aliases->parent for the passed in child -// aliases here are the aliases the parents use to refer to the child -func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { - l.mu.Lock() - - parents := make(map[string]*container.Container) - for parent, aliases := range l.childIdx[child] { - for alias := range aliases { - parents[alias] = parent - } - } - - l.mu.Unlock() - return parents -} - -// delete deletes all link relationships referencing this container -func (l *linkIndex) delete(container *container.Container) { - l.mu.Lock() - for _, child := range l.idx[container] { - delete(l.childIdx[child], container) - } - delete(l.idx, container) - delete(l.childIdx, container) - l.mu.Unlock() -} - -// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig -// when sqlite links were used, hostConfig.Links was set to nil -func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { - // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped - if container.HostConfig == nil || container.HostConfig.Links != nil { - return nil - } - - logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) - - fullName := container.Name - if fullName[0] != '/' { - fullName = "/" + fullName - } - - // don't use a nil slice, this ensures that the check above will skip once the migration has completed - links := []string{} - children, err := db.Children(fullName, 0) - if err != nil { - if !strings.Contains(err.Error(), "Cannot find child for") { - return err - } - // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration - } - - for _, child := range children { - c, err := daemon.GetContainer(child.Entity.ID()) - if err != nil { - return err - } - - links = append(links, c.Name+":"+child.Edge.Name) - } - - container.HostConfig.Links = links - return container.WriteHostConfig() -} diff --git a/daemon/links/links.go b/daemon/links/links.go deleted file mode 100644 index af15de046..000000000 --- a/daemon/links/links.go +++ /dev/null @@ -1,141 +0,0 @@ -package links - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/go-connections/nat" -) - -// Link struct holds informations about parent/child linked container -type Link struct { - // Parent container IP address - ParentIP string - // Child container IP address - ChildIP string - // Link name - Name string - // Child environments variables - ChildEnvironment []string - // Child exposed ports - Ports []nat.Port -} - -// NewLink initializes a new Link struct with the provided options. -func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { - var ( - i int - ports = make([]nat.Port, len(exposedPorts)) - ) - - for p := range exposedPorts { - ports[i] = p - i++ - } - - return &Link{ - Name: name, - ChildIP: childIP, - ParentIP: parentIP, - ChildEnvironment: env, - Ports: ports, - } -} - -// ToEnv creates a string's slice containing child container informations in -// the form of environment variables which will be later exported on container -// startup. -func (l *Link) ToEnv() []string { - env := []string{} - - _, n := path.Split(l.Name) - alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) - - if p := l.getDefaultPort(); p != nil { - env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) - } - - //sort the ports so that we can bulk the continuous ports together - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - - for i := 0; i < len(l.Ports); { - p := l.Ports[i] - j := nextContiguous(l.Ports, p.Int(), i) - if j > i+1 { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - - q := l.Ports[j] - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) - - i = j + 1 - continue - } else { - i++ - } - } - for _, p := range l.Ports { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - } - - // Load the linked container's name into the environment - env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) - - if l.ChildEnvironment != nil { - for _, v := range l.ChildEnvironment { - parts := strings.SplitN(v, "=", 2) - if len(parts) < 2 { - continue - } - // Ignore a few variables that are added during docker build (and not really relevant to linked containers) - if parts[0] == "HOME" || parts[0] == "PATH" { - continue - } - env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) - } - } - return env -} - -func nextContiguous(ports []nat.Port, value int, index int) int { - if index+1 == len(ports) { - return index - } - for i := index + 1; i < len(ports); i++ { - if ports[i].Int() > value+1 { - return i - 1 - } - - value++ - } - return len(ports) - 1 -} - -// Default port rules -func (l *Link) getDefaultPort() *nat.Port { - var p nat.Port - i := len(l.Ports) - - if i == 0 { - return nil - } else if i > 1 { - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - } - p = l.Ports[0] - return &p -} diff --git a/daemon/links/links_test.go b/daemon/links/links_test.go deleted file mode 100644 index 0273f13cf..000000000 --- a/daemon/links/links_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package links - -import ( - "fmt" - "strings" - "testing" - - "github.com/docker/go-connections/nat" -) - -// Just to make life easier -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestLinkNaming(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - - value, ok := env["DOCKER_1_PORT"] - - if !ok { - t.Fatalf("DOCKER_1_PORT not found in env") - } - - if value != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) - } -} - -func TestLinkNew(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) - - if link.Name != "/db/docker" { - t.Fail() - } - if link.ParentIP != "172.0.17.3" { - t.Fail() - } - if link.ChildIP != "172.0.17.2" { - t.Fail() - } - for _, p := range link.Ports { - if p != newPortNoError("tcp", "6379") { - t.Fail() - } - } -} - -func TestLinkEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } -} - -func TestLinkMultipleEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - ports[newPortNoError("tcp", "6380")] = struct{}{} - ports[newPortNoError("tcp", "6381")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) - } - if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { - t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { - t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } -} - -func TestLinkPortRangeEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - ports[newPortNoError("tcp", "6380")] = struct{}{} - ports[newPortNoError("tcp", "6381")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) - } - if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { - t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { - t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } - for i := range []int{6379, 6380, 6381} { - tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) - tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) - tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) - tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) - if env[tcpaddr] == "172.0.17.2" { - t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) - } - if env[tcpport] == fmt.Sprintf("%d", i) { - t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) - } - if env[tcpproto] == "tcp" { - t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) - } - if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { - t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) - } - } -} diff --git a/daemon/links_test.go b/daemon/links_test.go deleted file mode 100644 index d7a3c2aea..000000000 --- a/daemon/links_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package daemon - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" - "github.com/docker/docker/pkg/stringid" - containertypes "github.com/docker/engine-api/types/container" -) - -func TestMigrateLegacySqliteLinks(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - name1 := "test1" - c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name1, - HostConfig: &containertypes.HostConfig{}, - }, - } - c1.Root = tmpDir - - name2 := "test2" - c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name2, - }, - } - - store := container.NewMemoryStore() - store.Add(c1.ID, c1) - store.Add(c2.ID, c2) - - d := &Daemon{root: tmpDir, containers: store} - db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) - if err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name1, c1.ID); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name2, c2.ID); err != nil { - t.Fatal(err) - } - - alias := "hello" - if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { - t.Fatal(err) - } - - if err := d.migrateLegacySqliteLinks(db, c1); err != nil { - t.Fatal(err) - } - - if len(c1.HostConfig.Links) != 1 { - t.Fatal("expected links to be populated but is empty") - } - - expected := name2 + ":" + alias - actual := c1.HostConfig.Links[0] - if actual != expected { - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) - } - - // ensure this is persisted - b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) - if err != nil { - t.Fatal(err) - } - type hc struct { - Links []string - } - var cfg hc - if err := json.Unmarshal(b, &cfg); err != nil { - t.Fatal(err) - } - - if len(cfg.Links) != 1 { - t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) - } - if cfg.Links[0] != expected { // same expected as above - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) - } -} diff --git a/daemon/list.go b/daemon/list.go deleted file mode 100644 index 128d9eda9..000000000 --- a/daemon/list.go +++ /dev/null @@ -1,596 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" -) - -var acceptedVolumeFilterTags = map[string]bool{ - "dangling": true, - "name": true, - "driver": true, -} - -var acceptedPsFilterTags = map[string]bool{ - "ancestor": true, - "before": true, - "exited": true, - "id": true, - "isolation": true, - "label": true, - "name": true, - "status": true, - "since": true, - "volume": true, - "network": true, -} - -// iterationAction represents possible outcomes happening during the container iteration. -type iterationAction int - -// containerReducer represents a reducer for a container. -// Returns the object to serialize by the api. -type containerReducer func(*container.Container, *listContext) (*types.Container, error) - -const ( - // includeContainer is the action to include a container in the reducer. - includeContainer iterationAction = iota - // excludeContainer is the action to exclude a container in the reducer. - excludeContainer - // stopIteration is the action to stop iterating over the list of containers. - stopIteration -) - -// errStopIteration makes the iterator to stop without returning an error. -var errStopIteration = errors.New("container list iteration stopped") - -// List returns an array of all containers registered in the daemon. -func (daemon *Daemon) List() []*container.Container { - return daemon.containers.List() -} - -// listContext is the daemon generated filtering to iterate over containers. -// This is created based on the user specification from types.ContainerListOptions. -type listContext struct { - // idx is the container iteration index for this context - idx int - // ancestorFilter tells whether it should check ancestors or not - ancestorFilter bool - // names is a list of container names to filter with - names map[string][]string - // images is a list of images to filter with - images map[image.ID]bool - // filters is a collection of arguments to filter with, specified by the user - filters filters.Args - // exitAllowed is a list of exit codes allowed to filter with - exitAllowed []int - - // beforeFilter is a filter to ignore containers that appear before the one given - // this is used for --filter=before= and --before=, the latter is deprecated. - beforeFilter *container.Container - // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container - // this is used for --filter=since= and --since=, the latter is deprecated. - sinceFilter *container.Container - // ContainerListOptions is the filters set by the user - *types.ContainerListOptions -} - -// Containers returns the list of containers to show given the user's filtering. -func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.transformContainer) -} - -// ListContainersForNode returns all containerID that match the specified nodeID -func (daemon *Daemon) ListContainersForNode(nodeID string) []string { - var ids []string - for _, c := range daemon.List() { - if c.Config.Labels["com.docker.swarm.node.id"] == nodeID { - ids = append(ids, c.ID) - } - } - return ids -} - -func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { - idSearch := false - names := ctx.filters.Get("name") - ids := ctx.filters.Get("id") - if len(names)+len(ids) == 0 { - // if name or ID filters are not in use, return to - // standard behavior of walking the entire container - // list from the daemon's in-memory store - return daemon.List() - } - - // idSearch will determine if we limit name matching to the IDs - // matched from any IDs which were specified as filters - if len(ids) > 0 { - idSearch = true - } - - matches := make(map[string]bool) - // find ID matches; errors represent "not found" and can be ignored - for _, id := range ids { - if fullID, err := daemon.idIndex.Get(id); err == nil { - matches[fullID] = true - } - } - - // look for name matches; if ID filtering was used, then limit the - // search space to the matches map only; errors represent "not found" - // and can be ignored - if len(names) > 0 { - for id, idNames := range ctx.names { - // if ID filters were used and no matches on that ID were - // found, continue to next ID in the list - if idSearch && !matches[id] { - continue - } - for _, eachName := range idNames { - if ctx.filters.Match("name", eachName) { - matches[id] = true - } - } - } - } - - cntrs := make([]*container.Container, 0, len(matches)) - for id := range matches { - cntrs = append(cntrs, daemon.containers.Get(id)) - } - return cntrs -} - -// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. -func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { - containers := []*types.Container{} - - ctx, err := daemon.foldFilter(config) - if err != nil { - return nil, err - } - - // fastpath to only look at a subset of containers if specific name - // or ID matches were provided by the user--otherwise we potentially - // end up locking and querying many more containers than intended - containerList := daemon.filterByNameIDMatches(ctx) - - for _, container := range containerList { - t, err := daemon.reducePsContainer(container, ctx, reducer) - if err != nil { - if err != errStopIteration { - return nil, err - } - break - } - if t != nil { - containers = append(containers, t) - ctx.idx++ - } - } - return containers, nil -} - -// reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { - container.Lock() - defer container.Unlock() - - // filter containers to return - action := includeContainerInList(container, ctx) - switch action { - case excludeContainer: - return nil, nil - case stopIteration: - return nil, errStopIteration - } - - // transform internal container struct into api structs - return reducer(container, ctx) -} - -// foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { - psFilters := config.Filter - - if err := psFilters.Validate(acceptedPsFilterTags); err != nil { - return nil, err - } - - var filtExited []int - - err := psFilters.WalkValues("exited", func(value string) error { - code, err := strconv.Atoi(value) - if err != nil { - return err - } - filtExited = append(filtExited, code) - return nil - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("status", func(value string) error { - if !container.IsValidStateString(value) { - return fmt.Errorf("Unrecognised filter value for status: %s", value) - } - - config.All = true - return nil - }) - if err != nil { - return nil, err - } - - var beforeContFilter, sinceContFilter *container.Container - - err = psFilters.WalkValues("before", func(value string) error { - beforeContFilter, err = daemon.GetContainer(value) - return err - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("since", func(value string) error { - sinceContFilter, err = daemon.GetContainer(value) - return err - }) - if err != nil { - return nil, err - } - - imagesFilter := map[image.ID]bool{} - var ancestorFilter bool - if psFilters.Include("ancestor") { - ancestorFilter = true - psFilters.WalkValues("ancestor", func(ancestor string) error { - id, err := daemon.GetImageID(ancestor) - if err != nil { - logrus.Warnf("Error while looking up for image %v", ancestor) - return nil - } - if imagesFilter[id] { - // Already seen this ancestor, skip it - return nil - } - // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) - return nil - }) - } - - return &listContext{ - filters: psFilters, - ancestorFilter: ancestorFilter, - images: imagesFilter, - exitAllowed: filtExited, - beforeFilter: beforeContFilter, - sinceFilter: sinceContFilter, - ContainerListOptions: config, - names: daemon.nameIndex.GetAll(), - }, nil -} - -// includeContainerInList decides whether a container should be included in the output or not based in the filter. -// It also decides if the iteration should be stopped or not. -func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { - // Do not include container if it's in the list before the filter container. - // Set the filter container to nil to include the rest of containers after this one. - if ctx.beforeFilter != nil { - if container.ID == ctx.beforeFilter.ID { - ctx.beforeFilter = nil - } - return excludeContainer - } - - // Stop iteration when the container arrives to the filter container - if ctx.sinceFilter != nil { - if container.ID == ctx.sinceFilter.ID { - return stopIteration - } - } - - // Do not include container if it's stopped and we're not filters - if !container.Running && !ctx.All && ctx.Limit <= 0 { - return excludeContainer - } - - // Do not include container if the name doesn't match - if !ctx.filters.Match("name", container.Name) { - return excludeContainer - } - - // Do not include container if the id doesn't match - if !ctx.filters.Match("id", container.ID) { - return excludeContainer - } - - // Do not include container if any of the labels don't match - if !ctx.filters.MatchKVList("label", container.Config.Labels) { - return excludeContainer - } - - // Do not include container if isolation doesn't match - if excludeContainer == excludeByIsolation(container, ctx) { - return excludeContainer - } - - // Stop iteration when the index is over the limit - if ctx.Limit > 0 && ctx.idx == ctx.Limit { - return stopIteration - } - - // Do not include container if its exit code is not in the filter - if len(ctx.exitAllowed) > 0 { - shouldSkip := true - for _, code := range ctx.exitAllowed { - if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { - shouldSkip = false - break - } - } - if shouldSkip { - return excludeContainer - } - } - - // Do not include container if its status doesn't match the filter - if !ctx.filters.Match("status", container.State.StateString()) { - return excludeContainer - } - - if ctx.filters.Include("volume") { - volumesByName := make(map[string]*volume.MountPoint) - for _, m := range container.MountPoints { - if m.Name != "" { - volumesByName[m.Name] = m - } else { - volumesByName[m.Source] = m - } - } - - volumeExist := fmt.Errorf("volume mounted in container") - err := ctx.filters.WalkValues("volume", func(value string) error { - if _, exist := container.MountPoints[value]; exist { - return volumeExist - } - if _, exist := volumesByName[value]; exist { - return volumeExist - } - return nil - }) - if err != volumeExist { - return excludeContainer - } - } - - if ctx.ancestorFilter { - if len(ctx.images) == 0 { - return excludeContainer - } - if !ctx.images[container.ImageID] { - return excludeContainer - } - } - - networkExist := fmt.Errorf("container part of network") - if ctx.filters.Include("network") { - err := ctx.filters.WalkValues("network", func(value string) error { - if _, ok := container.NetworkSettings.Networks[value]; ok { - return networkExist - } - for _, nw := range container.NetworkSettings.Networks { - if nw.NetworkID == value { - return networkExist - } - } - return nil - }) - if err != networkExist { - return excludeContainer - } - } - - return includeContainer -} - -// transformContainer generates the container type expected by the docker ps command. -func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { - newC := &types.Container{ - ID: container.ID, - Names: ctx.names[container.ID], - ImageID: container.ImageID.String(), - } - if newC.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - newC.Names = []string{} - } - - image := container.Config.Image // if possible keep the original ref - if image != container.ImageID.String() { - id, err := daemon.GetImageID(image) - if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { - return nil, err - } - if err != nil || id != container.ImageID { - image = container.ImageID.String() - } - } - newC.Image = image - - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - - newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - newC.Command = container.Path - } - newC.Created = container.Created.Unix() - newC.State = container.State.StateString() - newC.Status = container.State.String() - newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - // copy networks to avoid races - networks := make(map[string]*networktypes.EndpointSettings) - for name, network := range container.NetworkSettings.Networks { - if network == nil { - continue - } - networks[name] = &networktypes.EndpointSettings{ - EndpointID: network.EndpointID, - Gateway: network.Gateway, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - IPv6Gateway: network.IPv6Gateway, - GlobalIPv6Address: network.GlobalIPv6Address, - GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, - MacAddress: network.MacAddress, - NetworkID: network.NetworkID, - } - if network.IPAMConfig != nil { - networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: network.IPAMConfig.IPv4Address, - IPv6Address: network.IPAMConfig.IPv6Address, - } - } - } - newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} - - newC.Ports = []types.Port{} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - return nil, err - } - if len(bindings) == 0 { - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - return nil, err - } - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - PublicPort: h, - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - - if ctx.Size { - sizeRw, sizeRootFs := daemon.getSize(container) - newC.SizeRw = sizeRw - newC.SizeRootFs = sizeRootFs - } - newC.Labels = container.Config.Labels - newC.Mounts = addMountPoints(container) - - return newC, nil -} - -// Volumes lists known volumes, using the filter to restrict the range -// of volumes returned. -func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { - var ( - volumesOut []*types.Volume - ) - volFilters, err := filters.FromParam(filter) - if err != nil { - return nil, nil, err - } - - if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { - return nil, nil, err - } - - volumes, warnings, err := daemon.volumes.List() - if err != nil { - return nil, nil, err - } - - filterVolumes, err := daemon.filterVolumes(volumes, volFilters) - if err != nil { - return nil, nil, err - } - for _, v := range filterVolumes { - apiV := volumeToAPIType(v) - if vv, ok := v.(interface { - CachedPath() string - }); ok { - apiV.Mountpoint = vv.CachedPath() - } else { - apiV.Mountpoint = v.Path() - } - volumesOut = append(volumesOut, apiV) - } - return volumesOut, warnings, nil -} - -// filterVolumes filters volume list according to user specified filter -// and returns user chosen volumes -func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { - // if filter is empty, return original volume list - if filter.Len() == 0 { - return vols, nil - } - - var retVols []volume.Volume - for _, vol := range vols { - if filter.Include("name") { - if !filter.Match("name", vol.Name()) { - continue - } - } - if filter.Include("driver") { - if !filter.Match("driver", vol.DriverName()) { - continue - } - } - retVols = append(retVols, vol) - } - danglingOnly := false - if filter.Include("dangling") { - if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { - danglingOnly = true - } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) - } - retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) - } - return retVols, nil -} - -func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { - if !ancestorMap[imageID] { - for _, id := range getChildren(imageID) { - populateImageFilterByParents(ancestorMap, id, getChildren) - } - ancestorMap[imageID] = true - } -} diff --git a/daemon/list_unix.go b/daemon/list_unix.go deleted file mode 100644 index 91c9caccf..000000000 --- a/daemon/list_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd solaris - -package daemon - -import "github.com/docker/docker/container" - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { - return includeContainer -} diff --git a/daemon/list_windows.go b/daemon/list_windows.go deleted file mode 100644 index 7fbcd3af2..000000000 --- a/daemon/list_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package daemon - -import ( - "strings" - - "github.com/docker/docker/container" -) - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { - i := strings.ToLower(string(container.HostConfig.Isolation)) - if i == "" { - i = "default" - } - if !ctx.filters.Match("isolation", i) { - return excludeContainer - } - return includeContainer -} diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go deleted file mode 100644 index 89fe49a85..000000000 --- a/daemon/logdrivers_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -package daemon - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/fluentd" - _ "github.com/docker/docker/daemon/logger/gcplogs" - _ "github.com/docker/docker/daemon/logger/gelf" - _ "github.com/docker/docker/daemon/logger/journald" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/splunk" - _ "github.com/docker/docker/daemon/logger/syslog" -) diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go deleted file mode 100644 index 129b06650..000000000 --- a/daemon/logdrivers_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/etwlogs" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/splunk" -) diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go deleted file mode 100644 index 78a230fe8..000000000 --- a/daemon/logger/awslogs/cloudwatchlogs.go +++ /dev/null @@ -1,375 +0,0 @@ -// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs -package awslogs - -import ( - "errors" - "fmt" - "os" - "runtime" - "sort" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" -) - -const ( - name = "awslogs" - regionKey = "awslogs-region" - regionEnvKey = "AWS_REGION" - logGroupKey = "awslogs-group" - logStreamKey = "awslogs-stream" - batchPublishFrequency = 5 * time.Second - - // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html - perEventBytes = 26 - maximumBytesPerPut = 1048576 - maximumLogEventsPerPut = 10000 - - // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - maximumBytesPerEvent = 262144 - perEventBytes - - resourceAlreadyExistsCode = "ResourceAlreadyExistsException" - dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" - invalidSequenceTokenCode = "InvalidSequenceTokenException" - - userAgentHeader = "User-Agent" -) - -type logStream struct { - logStreamName string - logGroupName string - client api - messages chan *logger.Message - lock sync.RWMutex - closed bool - sequenceToken *string -} - -type api interface { - CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) - PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) -} - -type regionFinder interface { - Region() (string, error) -} - -type byTimestamp []*cloudwatchlogs.InputLogEvent - -// init registers the awslogs driver -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates an awslogs logger using the configuration passed in on the -// context. Supported context configuration variables are awslogs-region, -// awslogs-group, and awslogs-stream. When available, configuration is -// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, -// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and -// the EC2 Instance Metadata Service. -func New(ctx logger.Context) (logger.Logger, error) { - logGroupName := ctx.Config[logGroupKey] - logStreamName := ctx.ContainerID - if ctx.Config[logStreamKey] != "" { - logStreamName = ctx.Config[logStreamKey] - } - client, err := newAWSLogsClient(ctx) - if err != nil { - return nil, err - } - containerStream := &logStream{ - logStreamName: logStreamName, - logGroupName: logGroupName, - client: client, - messages: make(chan *logger.Message, 4096), - } - err = containerStream.create() - if err != nil { - return nil, err - } - go containerStream.collectBatch() - - return containerStream, nil -} - -// newRegionFinder is a variable such that the implementation -// can be swapped out for unit tests. -var newRegionFinder = func() regionFinder { - return ec2metadata.New(session.New()) -} - -// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. -// Customizations to the default client from the SDK include a Docker-specific -// User-Agent string and automatic region detection using the EC2 Instance -// Metadata Service when region is otherwise unspecified. -func newAWSLogsClient(ctx logger.Context) (api, error) { - var region *string - if os.Getenv(regionEnvKey) != "" { - region = aws.String(os.Getenv(regionEnvKey)) - } - if ctx.Config[regionKey] != "" { - region = aws.String(ctx.Config[regionKey]) - } - if region == nil || *region == "" { - logrus.Info("Trying to get region from EC2 Metadata") - ec2MetadataClient := newRegionFinder() - r, err := ec2MetadataClient.Region() - if err != nil { - logrus.WithFields(logrus.Fields{ - "error": err, - }).Error("Could not get region from EC2 metadata, environment, or log option") - return nil, errors.New("Cannot determine region for awslogs driver") - } - region = &r - } - logrus.WithFields(logrus.Fields{ - "region": *region, - }).Debug("Created awslogs client") - - client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) - - client.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "DockerUserAgentHandler", - Fn: func(r *request.Request) { - currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) - r.HTTPRequest.Header.Set(userAgentHeader, - fmt.Sprintf("Docker %s (%s) %s", - dockerversion.Version, runtime.GOOS, currentAgent)) - }, - }) - return client, nil -} - -// Name returns the name of the awslogs logging driver -func (l *logStream) Name() string { - return name -} - -// Log submits messages for logging by an instance of the awslogs logging driver -func (l *logStream) Log(msg *logger.Message) error { - l.lock.RLock() - defer l.lock.RUnlock() - if !l.closed { - l.messages <- msg - } - return nil -} - -// Close closes the instance of the awslogs logging driver -func (l *logStream) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - if !l.closed { - close(l.messages) - } - l.closed = true - return nil -} - -// create creates a log stream for the instance of the awslogs logging driver -func (l *logStream) create() error { - input := &cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - - _, err := l.client.CreateLogStream(input) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - fields := logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - } - if awsErr.Code() == resourceAlreadyExistsCode { - // Allow creation to succeed - logrus.WithFields(fields).Info("Log stream already exists") - return nil - } - logrus.WithFields(fields).Error("Failed to create log stream") - } - } - return err -} - -// newTicker is used for time-based batching. newTicker is a variable such -// that the implementation can be swapped out for unit tests. -var newTicker = func(freq time.Duration) *time.Ticker { - return time.NewTicker(freq) -} - -// collectBatch executes as a goroutine to perform batching of log events for -// submission to the log stream. Batching is performed on time- and size- -// bases. Time-based batching occurs at a 5 second interval (defined in the -// batchPublishFrequency const). Size-based batching is performed on the -// maximum number of events per batch (defined in maximumLogEventsPerPut) and -// the maximum number of total bytes in a batch (defined in -// maximumBytesPerPut). Log messages are split by the maximum bytes per event -// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead -// (defined in perEventBytes) which is accounted for in split- and batch- -// calculations. -func (l *logStream) collectBatch() { - timer := newTicker(batchPublishFrequency) - var events []*cloudwatchlogs.InputLogEvent - bytes := 0 - for { - select { - case <-timer.C: - l.publishBatch(events) - events = events[:0] - bytes = 0 - case msg, more := <-l.messages: - if !more { - l.publishBatch(events) - return - } - unprocessedLine := msg.Line - for len(unprocessedLine) > 0 { - // Split line length so it does not exceed the maximum - lineBytes := len(unprocessedLine) - if lineBytes > maximumBytesPerEvent { - lineBytes = maximumBytesPerEvent - } - line := unprocessedLine[:lineBytes] - unprocessedLine = unprocessedLine[lineBytes:] - if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { - // Publish an existing batch if it's already over the maximum number of events or if adding this - // event would push it over the maximum number of total bytes. - l.publishBatch(events) - events = events[:0] - bytes = 0 - } - events = append(events, &cloudwatchlogs.InputLogEvent{ - Message: aws.String(string(line)), - Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), - }) - bytes += (lineBytes + perEventBytes) - } - } - } -} - -// publishBatch calls PutLogEvents for a given set of InputLogEvents, -// accounting for sequencing requirements (each request must reference the -// sequence token returned by the previous request). -func (l *logStream) publishBatch(events []*cloudwatchlogs.InputLogEvent) { - if len(events) == 0 { - return - } - - sort.Sort(byTimestamp(events)) - - nextSequenceToken, err := l.putLogEvents(events, l.sequenceToken) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dataAlreadyAcceptedCode { - // already submitted, just grab the correct sequence token - parts := strings.Split(awsErr.Message(), " ") - nextSequenceToken = &parts[len(parts)-1] - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Info("Data already accepted, ignoring error") - err = nil - } else if awsErr.Code() == invalidSequenceTokenCode { - // sequence code is bad, grab the correct one and retry - parts := strings.Split(awsErr.Message(), " ") - token := parts[len(parts)-1] - nextSequenceToken, err = l.putLogEvents(events, &token) - } - } - } - if err != nil { - logrus.Error(err) - } else { - l.sequenceToken = nextSequenceToken - } -} - -// putLogEvents wraps the PutLogEvents API -func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { - input := &cloudwatchlogs.PutLogEventsInput{ - LogEvents: events, - SequenceToken: sequenceToken, - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - resp, err := l.client.PutLogEvents(input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Error("Failed to put log events") - } - return nil, err - } - return resp.NextSequenceToken, nil -} - -// ValidateLogOpt looks for awslogs-specific log options awslogs-region, -// awslogs-group, and awslogs-stream -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case logGroupKey: - case logStreamKey: - case regionKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) - } - } - if cfg[logGroupKey] == "" { - return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) - } - return nil -} - -// Len returns the length of a byTimestamp slice. Len is required by the -// sort.Interface interface. -func (slice byTimestamp) Len() int { - return len(slice) -} - -// Less compares two values in a byTimestamp slice by Timestamp. Less is -// required by the sort.Interface interface. -func (slice byTimestamp) Less(i, j int) bool { - iTimestamp, jTimestamp := int64(0), int64(0) - if slice != nil && slice[i].Timestamp != nil { - iTimestamp = *slice[i].Timestamp - } - if slice != nil && slice[j].Timestamp != nil { - jTimestamp = *slice[j].Timestamp - } - return iTimestamp < jTimestamp -} - -// Swap swaps two values in a byTimestamp slice with each other. Swap is -// required by the sort.Interface interface. -func (slice byTimestamp) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go deleted file mode 100644 index 48882c4ce..000000000 --- a/daemon/logger/awslogs/cloudwatchlogs_test.go +++ /dev/null @@ -1,627 +0,0 @@ -package awslogs - -import ( - "errors" - "fmt" - "net/http" - "runtime" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" -) - -const ( - groupName = "groupName" - streamName = "streamName" - sequenceToken = "sequenceToken" - nextSequenceToken = "nextSequenceToken" - logline = "this is a log line" -) - -func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { - ctx := logger.Context{ - Config: map[string]string{ - regionKey: "us-east-1", - }, - } - - client, err := newAWSLogsClient(ctx) - if err != nil { - t.Fatal(err) - } - realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) - if !ok { - t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") - } - buildHandlerList := realClient.Handlers.Build - request := &request.Request{ - HTTPRequest: &http.Request{ - Header: http.Header{}, - }, - } - buildHandlerList.Run(request) - expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", - dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) - userAgent := request.HTTPRequest.Header.Get("User-Agent") - if userAgent != expectedUserAgentString { - t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", - expectedUserAgentString, userAgent) - } -} - -func TestNewAWSLogsClientRegionDetect(t *testing.T) { - ctx := logger.Context{ - Config: map[string]string{}, - } - - mockMetadata := newMockMetadataClient() - newRegionFinder = func() regionFinder { - return mockMetadata - } - mockMetadata.regionResult <- ®ionResult{ - successResult: "us-east-1", - } - - _, err := newAWSLogsClient(ctx) - if err != nil { - t.Fatal(err) - } -} - -func TestCreateSuccess(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - } - mockClient.createLogStreamResult <- &createLogStreamResult{} - - err := stream.create() - - if err != nil { - t.Errorf("Received unexpected err: %v\n", err) - } - argument := <-mockClient.createLogStreamArgument - if argument.LogGroupName == nil { - t.Fatal("Expected non-nil LogGroupName") - } - if *argument.LogGroupName != groupName { - t.Errorf("Expected LogGroupName to be %s", groupName) - } - if argument.LogStreamName == nil { - t.Fatal("Expected non-nil LogGroupName") - } - if *argument.LogStreamName != streamName { - t.Errorf("Expected LogStreamName to be %s", streamName) - } -} - -func TestCreateError(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - } - mockClient.createLogStreamResult <- &createLogStreamResult{ - errorResult: errors.New("Error!"), - } - - err := stream.create() - - if err == nil { - t.Fatal("Expected non-nil err") - } -} - -func TestCreateAlreadyExists(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - } - mockClient.createLogStreamResult <- &createLogStreamResult{ - errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), - } - - err := stream.create() - - if err != nil { - t.Fatal("Expected nil err") - } -} - -func TestPublishBatchSuccess(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != nextSequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) - } - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestPublishBatchError(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: errors.New("Error!"), - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != sequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) - } -} - -func TestPublishBatchInvalidSeqSuccess(t *testing.T) { - mockClient := newMockClientBuffered(2) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != nextSequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) - } - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } - - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != "token" { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestPublishBatchAlreadyAccepted(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != "token" { - t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) - } - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestCollectBatchSimple(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - ticks := make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline { - t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) - } -} - -func TestCollectBatchTicker(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - ticks := make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline + " 1"), - Timestamp: time.Time{}, - }) - stream.Log(&logger.Message{ - Line: []byte(logline + " 2"), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - - // Verify first batch - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 2 { - t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline+" 1" { - t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) - } - if *argument.LogEvents[1].Message != logline+" 2" { - t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) - } - - stream.Log(&logger.Message{ - Line: []byte(logline + " 3"), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline+" 3" { - t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) - } - - stream.Close() - -} - -func TestCollectBatchClose(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline { - t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) - } -} - -func TestCollectBatchLineSplit(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - longline := strings.Repeat("A", maximumBytesPerEvent) - stream.Log(&logger.Message{ - Line: []byte(longline + "B"), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 2 { - t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != longline { - t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) - } - if *argument.LogEvents[1].Message != "B" { - t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) - } -} - -func TestCollectBatchMaxEvents(t *testing.T) { - mockClient := newMockClientBuffered(1) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - line := "A" - for i := 0; i <= maximumLogEventsPerPut; i++ { - stream.Log(&logger.Message{ - Line: []byte(line), - Timestamp: time.Time{}, - }) - } - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != maximumLogEventsPerPut { - t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) - } - - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) - } -} - -func TestCollectBatchMaxTotalBytes(t *testing.T) { - mockClient := newMockClientBuffered(1) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - longline := strings.Repeat("A", maximumBytesPerPut) - stream.Log(&logger.Message{ - Line: []byte(longline + "B"), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - bytes := 0 - for _, event := range argument.LogEvents { - bytes += len(*event.Message) - } - if bytes > maximumBytesPerPut { - t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) - } - - argument = <-mockClient.putLogEventsArgument - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) - } - message := *argument.LogEvents[0].Message - if message[len(message)-1:] != "B" { - t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) - } -} diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go deleted file mode 100644 index b768a3d7e..000000000 --- a/daemon/logger/awslogs/cwlogsiface_mock_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package awslogs - -import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - -type mockcwlogsclient struct { - createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput - createLogStreamResult chan *createLogStreamResult - putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput - putLogEventsResult chan *putLogEventsResult -} - -type createLogStreamResult struct { - successResult *cloudwatchlogs.CreateLogStreamOutput - errorResult error -} - -type putLogEventsResult struct { - successResult *cloudwatchlogs.PutLogEventsOutput - errorResult error -} - -func newMockClient() *mockcwlogsclient { - return &mockcwlogsclient{ - createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), - createLogStreamResult: make(chan *createLogStreamResult, 1), - putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), - putLogEventsResult: make(chan *putLogEventsResult, 1), - } -} - -func newMockClientBuffered(buflen int) *mockcwlogsclient { - return &mockcwlogsclient{ - createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), - createLogStreamResult: make(chan *createLogStreamResult, buflen), - putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), - putLogEventsResult: make(chan *putLogEventsResult, buflen), - } -} - -func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { - m.createLogStreamArgument <- input - output := <-m.createLogStreamResult - return output.successResult, output.errorResult -} - -func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) - copy(events, input.LogEvents) - m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ - LogEvents: events, - SequenceToken: input.SequenceToken, - LogGroupName: input.LogGroupName, - LogStreamName: input.LogStreamName, - } - output := <-m.putLogEventsResult - return output.successResult, output.errorResult -} - -type mockmetadataclient struct { - regionResult chan *regionResult -} - -type regionResult struct { - successResult string - errorResult error -} - -func newMockMetadataClient() *mockmetadataclient { - return &mockmetadataclient{ - regionResult: make(chan *regionResult, 1), - } -} - -func (m *mockmetadataclient) Region() (string, error) { - output := <-m.regionResult - return output.successResult, output.errorResult -} diff --git a/daemon/logger/context.go b/daemon/logger/context.go deleted file mode 100644 index 2b0e071f6..000000000 --- a/daemon/logger/context.go +++ /dev/null @@ -1,113 +0,0 @@ -package logger - -import ( - "fmt" - "os" - "strings" - "time" -) - -// Context provides enough information for a logging driver to do its function. -type Context struct { - Config map[string]string - ContainerID string - ContainerName string - ContainerEntrypoint string - ContainerArgs []string - ContainerImageID string - ContainerImageName string - ContainerCreated time.Time - ContainerEnv []string - ContainerLabels map[string]string - LogPath string - DaemonName string -} - -// ExtraAttributes returns the user-defined extra attributes (labels, -// environment variables) in key-value format. This can be used by log drivers -// that support metadata to add more context to a log. -func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { - extra := make(map[string]string) - labels, ok := ctx.Config["labels"] - if ok && len(labels) > 0 { - for _, l := range strings.Split(labels, ",") { - if v, ok := ctx.ContainerLabels[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - env, ok := ctx.Config["env"] - if ok && len(env) > 0 { - envMapping := make(map[string]string) - for _, e := range ctx.ContainerEnv { - if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { - envMapping[kv[0]] = kv[1] - } - } - for _, l := range strings.Split(env, ",") { - if v, ok := envMapping[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - return extra -} - -// Hostname returns the hostname from the underlying OS. -func (ctx *Context) Hostname() (string, error) { - hostname, err := os.Hostname() - if err != nil { - return "", fmt.Errorf("logger: can not resolve hostname: %v", err) - } - return hostname, nil -} - -// Command returns the command that the container being logged was -// started with. The Entrypoint is prepended to the container -// arguments. -func (ctx *Context) Command() string { - terms := []string{ctx.ContainerEntrypoint} - for _, arg := range ctx.ContainerArgs { - terms = append(terms, arg) - } - command := strings.Join(terms, " ") - return command -} - -// ID Returns the Container ID shortened to 12 characters. -func (ctx *Context) ID() string { - return ctx.ContainerID[:12] -} - -// FullID is an alias of ContainerID. -func (ctx *Context) FullID() string { - return ctx.ContainerID -} - -// Name returns the ContainerName without a preceding '/'. -func (ctx *Context) Name() string { - return ctx.ContainerName[1:] -} - -// ImageID returns the ContainerImageID shortened to 12 characters. -func (ctx *Context) ImageID() string { - return ctx.ContainerImageID[:12] -} - -// ImageFullID is an alias of ContainerImageID. -func (ctx *Context) ImageFullID() string { - return ctx.ContainerImageID -} - -// ImageName is an alias of ContainerImageName -func (ctx *Context) ImageName() string { - return ctx.ContainerImageName -} diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go deleted file mode 100644 index a54f23f28..000000000 --- a/daemon/logger/copier.go +++ /dev/null @@ -1,81 +0,0 @@ -package logger - -import ( - "bufio" - "bytes" - "io" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// Copier can copy logs from specified sources to Logger and attach Timestamp. -// Writes are concurrent, so you need implement some sync in your logger -type Copier struct { - // srcs is map of name -> reader pairs, for example "stdout", "stderr" - srcs map[string]io.Reader - dst Logger - copyJobs sync.WaitGroup - closeOnce sync.Once - closed chan struct{} -} - -// NewCopier creates a new Copier -func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { - return &Copier{ - srcs: srcs, - dst: dst, - closed: make(chan struct{}), - } -} - -// Run starts logs copying -func (c *Copier) Run() { - for src, w := range c.srcs { - c.copyJobs.Add(1) - go c.copySrc(src, w) - } -} - -func (c *Copier) copySrc(name string, src io.Reader) { - defer c.copyJobs.Done() - reader := bufio.NewReader(src) - - for { - select { - case <-c.closed: - return - default: - line, err := reader.ReadBytes('\n') - line = bytes.TrimSuffix(line, []byte{'\n'}) - - // ReadBytes can return full or partial output even when it failed. - // e.g. it can return a full entry and EOF. - if err == nil || len(line) > 0 { - if logErr := c.dst.Log(&Message{Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { - logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) - } - } - - if err != nil { - if err != io.EOF { - logrus.Errorf("Error scanning log stream: %s", err) - } - return - } - } - } -} - -// Wait waits until all copying is done -func (c *Copier) Wait() { - c.copyJobs.Wait() -} - -// Close closes the copier -func (c *Copier) Close() { - c.closeOnce.Do(func() { - close(c.closed) - }) -} diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go deleted file mode 100644 index 69225e9a7..000000000 --- a/daemon/logger/copier_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logger - -import ( - "bytes" - "encoding/json" - "io" - "sync" - "testing" - "time" -) - -type TestLoggerJSON struct { - *json.Encoder - mu sync.Mutex - delay time.Duration -} - -func (l *TestLoggerJSON) Log(m *Message) error { - if l.delay > 0 { - time.Sleep(l.delay) - } - l.mu.Lock() - defer l.mu.Unlock() - return l.Encode(m) -} - -func (l *TestLoggerJSON) Close() error { return nil } - -func (l *TestLoggerJSON) Name() string { return "json" } - -func TestCopier(t *testing.T) { - stdoutLine := "Line that thinks that it is log line from docker stdout" - stderrLine := "Line that thinks that it is log line from docker stderr" - var stdout bytes.Buffer - var stderr bytes.Buffer - for i := 0; i < 30; i++ { - if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { - t.Fatal(err) - } - if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { - t.Fatal(err) - } - } - - var jsonBuf bytes.Buffer - - jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} - - c := NewCopier( - map[string]io.Reader{ - "stdout": &stdout, - "stderr": &stderr, - }, - jsonLog) - c.Run() - wait := make(chan struct{}) - go func() { - c.Wait() - close(wait) - }() - select { - case <-time.After(1 * time.Second): - t.Fatal("Copier failed to do its work in 1 second") - case <-wait: - } - dec := json.NewDecoder(&jsonBuf) - for { - var msg Message - if err := dec.Decode(&msg); err != nil { - if err == io.EOF { - break - } - t.Fatal(err) - } - if msg.Source != "stdout" && msg.Source != "stderr" { - t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") - } - if msg.Source == "stdout" { - if string(msg.Line) != stdoutLine { - t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stdoutLine) - } - } - if msg.Source == "stderr" { - if string(msg.Line) != stderrLine { - t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stderrLine) - } - } - } -} - -func TestCopierSlow(t *testing.T) { - stdoutLine := "Line that thinks that it is log line from docker stdout" - var stdout bytes.Buffer - for i := 0; i < 30; i++ { - if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { - t.Fatal(err) - } - } - - var jsonBuf bytes.Buffer - //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} - jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} - - c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) - c.Run() - wait := make(chan struct{}) - go func() { - c.Wait() - close(wait) - }() - <-time.After(150 * time.Millisecond) - c.Close() - select { - case <-time.After(200 * time.Millisecond): - t.Fatalf("failed to exit in time after the copier is closed") - case <-wait: - } -} diff --git a/daemon/logger/etwlogs/etwlogs_windows.go b/daemon/logger/etwlogs/etwlogs_windows.go deleted file mode 100644 index ab3ff1f2b..000000000 --- a/daemon/logger/etwlogs/etwlogs_windows.go +++ /dev/null @@ -1,183 +0,0 @@ -// Package etwlogs provides a log driver for forwarding container logs -// as ETW events.(ETW stands for Event Tracing for Windows) -// A client can then create an ETW listener to listen for events that are sent -// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". -// Here is an example of how to do this using the logman utility: -// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl -// 2. Run container(s) and generate log messages -// 3. logman stop -ets DockerContainerLogs -// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl -// -// Each container log message generates an ETW event that also contains: -// the container name and ID, the timestamp, and the stream type. -package etwlogs - -import ( - "errors" - "fmt" - "sync" - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" -) - -type etwLogs struct { - containerName string - imageName string - containerID string - imageID string -} - -const ( - name = "etwlogs" - win32CallSuccess = 0 -) - -var win32Lib *syscall.DLL -var providerHandle syscall.Handle -var refCount int -var mu sync.Mutex - -func init() { - providerHandle = syscall.InvalidHandle - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } -} - -// New creates a new etwLogs logger for the given container and registers the EWT provider. -func New(ctx logger.Context) (logger.Logger, error) { - if err := registerETWProvider(); err != nil { - return nil, err - } - logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) - - return &etwLogs{ - containerName: fixContainerName(ctx.ContainerName), - imageName: ctx.ContainerImageName, - containerID: ctx.ContainerID, - imageID: ctx.ContainerImageID, - }, nil -} - -// Log logs the message to the ETW stream. -func (etwLogger *etwLogs) Log(msg *logger.Message) error { - if providerHandle == syscall.InvalidHandle { - // This should never be hit, if it is, it indicates a programming error. - errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return callEventWriteString(createLogMessage(etwLogger, msg)) -} - -// Close closes the logger by unregistering the ETW provider. -func (etwLogger *etwLogs) Close() error { - unregisterETWProvider() - return nil -} - -func (etwLogger *etwLogs) Name() string { - return name -} - -func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { - return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", - etwLogger.containerName, - etwLogger.imageName, - etwLogger.containerID, - etwLogger.imageID, - msg.Source, - msg.Line) -} - -// fixContainerName removes the initial '/' from the container name. -func fixContainerName(cntName string) string { - if len(cntName) > 0 && cntName[0] == '/' { - cntName = cntName[1:] - } - return cntName -} - -func registerETWProvider() error { - mu.Lock() - defer mu.Unlock() - if refCount == 0 { - var err error - if win32Lib, err = syscall.LoadDLL("Advapi32.dll"); err != nil { - return err - } - if err = callEventRegister(); err != nil { - win32Lib.Release() - win32Lib = nil - return err - } - } - - refCount++ - return nil -} - -func unregisterETWProvider() { - mu.Lock() - defer mu.Unlock() - if refCount == 1 { - if callEventUnregister() { - refCount-- - providerHandle = syscall.InvalidHandle - win32Lib.Release() - win32Lib = nil - } - // Not returning an error if EventUnregister fails, because etwLogs will continue to work - } else { - refCount-- - } -} - -func callEventRegister() error { - proc, err := win32Lib.FindProc("EventRegister") - if err != nil { - return err - } - // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} - guid := syscall.GUID{ - 0xa3693192, 0x9ed6, 0x46d2, - [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, - } - - ret, _, _ := proc.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventWriteString(message string) error { - proc, err := win32Lib.FindProc("EventWriteString") - if err != nil { - return err - } - ret, _, _ := proc.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventUnregister() bool { - proc, err := win32Lib.FindProc("EventUnregister") - if err != nil { - return false - } - ret, _, _ := proc.Call(uintptr(providerHandle)) - if ret != win32CallSuccess { - return false - } - return true -} diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go deleted file mode 100644 index 9cf716b09..000000000 --- a/daemon/logger/factory.go +++ /dev/null @@ -1,104 +0,0 @@ -package logger - -import ( - "fmt" - "sync" -) - -// Creator builds a logging driver instance with given context. -type Creator func(Context) (Logger, error) - -// LogOptValidator checks the options specific to the underlying -// logging implementation. -type LogOptValidator func(cfg map[string]string) error - -type logdriverFactory struct { - registry map[string]Creator - optValidator map[string]LogOptValidator - m sync.Mutex -} - -func (lf *logdriverFactory) register(name string, c Creator) error { - if lf.driverRegistered(name) { - return fmt.Errorf("logger: log driver named '%s' is already registered", name) - } - - lf.m.Lock() - lf.registry[name] = c - lf.m.Unlock() - return nil -} - -func (lf *logdriverFactory) driverRegistered(name string) bool { - lf.m.Lock() - _, ok := lf.registry[name] - lf.m.Unlock() - return ok -} - -func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { - lf.m.Lock() - defer lf.m.Unlock() - - if _, ok := lf.optValidator[name]; ok { - return fmt.Errorf("logger: log validator named '%s' is already registered", name) - } - lf.optValidator[name] = l - return nil -} - -func (lf *logdriverFactory) get(name string) (Creator, error) { - lf.m.Lock() - defer lf.m.Unlock() - - c, ok := lf.registry[name] - if !ok { - return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) - } - return c, nil -} - -func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { - lf.m.Lock() - defer lf.m.Unlock() - - c, _ := lf.optValidator[name] - return c -} - -var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance - -// RegisterLogDriver registers the given logging driver builder with given logging -// driver name. -func RegisterLogDriver(name string, c Creator) error { - return factory.register(name, c) -} - -// RegisterLogOptValidator registers the logging option validator with -// the given logging driver name. -func RegisterLogOptValidator(name string, l LogOptValidator) error { - return factory.registerLogOptValidator(name, l) -} - -// GetLogDriver provides the logging driver builder for a logging driver name. -func GetLogDriver(name string) (Creator, error) { - return factory.get(name) -} - -// ValidateLogOpts checks the options for the given log driver. The -// options supported are specific to the LogDriver implementation. -func ValidateLogOpts(name string, cfg map[string]string) error { - if name == "none" { - return nil - } - - if !factory.driverRegistered(name) { - return fmt.Errorf("logger: no log driver named '%s' is registered", name) - } - - validator := factory.getLogOptValidator(name) - if validator != nil { - return validator(cfg) - } - return nil -} diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go deleted file mode 100644 index 137ab0769..000000000 --- a/daemon/logger/fluentd/fluentd.go +++ /dev/null @@ -1,200 +0,0 @@ -// Package fluentd provides the log driver for forwarding server logs -// to fluentd endpoints. -package fluentd - -import ( - "fmt" - "math" - "net" - "strconv" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/go-units" - "github.com/fluent/fluent-logger-golang/fluent" -) - -type fluentd struct { - tag string - containerID string - containerName string - writer *fluent.Fluent - extra map[string]string -} - -const ( - name = "fluentd" - - defaultHost = "127.0.0.1" - defaultPort = 24224 - defaultBufferLimit = 1024 * 1024 - defaultTagPrefix = "docker" - - // logger tries to reconnect 2**32 - 1 times - // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] - defaultRetryWait = 1000 - defaultTimeout = 3 * time.Second - defaultMaxRetries = math.MaxInt32 - defaultReconnectWaitIncreRate = 1.5 - - addressKey = "fluentd-address" - bufferLimitKey = "fluentd-buffer-limit" - retryWaitKey = "fluentd-retry-wait" - maxRetriesKey = "fluentd-max-retries" - asyncConnectKey = "fluentd-async-connect" -) - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a fluentd logger using the configuration passed in on -// the context. The supported context configuration variable is -// fluentd-address. -func New(ctx logger.Context) (logger.Logger, error) { - host, port, err := parseAddress(ctx.Config[addressKey]) - if err != nil { - return nil, err - } - - tag, err := loggerutils.ParseLogTag(ctx, "{{.DaemonName}}.{{.ID}}") - if err != nil { - return nil, err - } - - extra := ctx.ExtraAttributes(nil) - - bufferLimit := defaultBufferLimit - if ctx.Config[bufferLimitKey] != "" { - bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) - if err != nil { - return nil, err - } - bufferLimit = int(bl64) - } - - retryWait := defaultRetryWait - if ctx.Config[retryWaitKey] != "" { - rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) - if err != nil { - return nil, err - } - retryWait = int(rwd.Seconds() * 1000) - } - - maxRetries := defaultMaxRetries - if ctx.Config[maxRetriesKey] != "" { - mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) - if err != nil { - return nil, err - } - maxRetries = int(mr64) - } - - asyncConnect := false - if ctx.Config[asyncConnectKey] != "" { - if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { - return nil, err - } - } - - fluentConfig := fluent.Config{ - FluentPort: port, - FluentHost: host, - BufferLimit: bufferLimit, - RetryWait: retryWait, - MaxRetry: maxRetries, - AsyncConnect: asyncConnect, - } - - logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). - Debug("logging driver fluentd configured") - - log, err := fluent.New(fluentConfig) - if err != nil { - return nil, err - } - return &fluentd{ - tag: tag, - containerID: ctx.ContainerID, - containerName: ctx.ContainerName, - writer: log, - extra: extra, - }, nil -} - -func (f *fluentd) Log(msg *logger.Message) error { - data := map[string]string{ - "container_id": f.containerID, - "container_name": f.containerName, - "source": msg.Source, - "log": string(msg.Line), - } - for k, v := range f.extra { - data[k] = v - } - // fluent-logger-golang buffers logs from failures and disconnections, - // and these are transferred again automatically. - return f.writer.PostWithTime(f.tag, msg.Timestamp, data) -} - -func (f *fluentd) Close() error { - return f.writer.Close() -} - -func (f *fluentd) Name() string { - return name -} - -// ValidateLogOpt looks for fluentd specific log option fluentd-address. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "labels": - case "tag": - case addressKey: - case bufferLimitKey: - case retryWaitKey: - case maxRetriesKey: - case asyncConnectKey: - // Accepted - default: - return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) - } - } - - if _, _, err := parseAddress(cfg["fluentd-address"]); err != nil { - return err - } - - return nil -} - -func parseAddress(address string) (string, int, error) { - if address == "" { - return defaultHost, defaultPort, nil - } - - host, port, err := net.SplitHostPort(address) - if err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) - } - return host, defaultPort, nil - } - - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) - } - return host, portnum, nil -} diff --git a/daemon/logger/gcplogs/gcplogging.go b/daemon/logger/gcplogs/gcplogging.go deleted file mode 100644 index 781642bb5..000000000 --- a/daemon/logger/gcplogs/gcplogging.go +++ /dev/null @@ -1,191 +0,0 @@ -package gcplogs - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/docker/docker/daemon/logger" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/cloud/compute/metadata" - "google.golang.org/cloud/logging" -) - -const ( - name = "gcplogs" - - projectOptKey = "gcp-project" - logLabelsKey = "labels" - logEnvKey = "env" - logCmdKey = "gcp-log-cmd" -) - -var ( - // The number of logs the gcplogs driver has dropped. - droppedLogs uint64 - - onGCE bool - - // instance metadata populated from the metadata server if available - projectID string - zone string - instanceName string - instanceID string -) - -func init() { - - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - - if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { - logrus.Fatal(err) - } -} - -type gcplogs struct { - client *logging.Client - instance *instanceInfo - container *containerInfo -} - -type dockerLogEntry struct { - Instance *instanceInfo `json:"instance,omitempty"` - Container *containerInfo `json:"container,omitempty"` - Data string `json:"data,omitempty"` -} - -type instanceInfo struct { - Zone string `json:"zone,omitempty"` - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` -} - -type containerInfo struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - ImageName string `json:"imageName,omitempty"` - ImageID string `json:"imageId,omitempty"` - Created time.Time `json:"created,omitempty"` - Command string `json:"command,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -var initGCPOnce sync.Once - -func initGCP() { - initGCPOnce.Do(func() { - onGCE = metadata.OnGCE() - if onGCE { - // These will fail on instances if the metadata service is - // down or the client is compiled with an API version that - // has been removed. Since these are not vital, let's ignore - // them and make their fields in the dockeLogEntry ,omitempty - projectID, _ = metadata.ProjectID() - zone, _ = metadata.Zone() - instanceName, _ = metadata.InstanceName() - instanceID, _ = metadata.InstanceID() - } - }) -} - -// New creates a new logger that logs to Google Cloud Logging using the application -// default credentials. -// -// See https://developers.google.com/identity/protocols/application-default-credentials -func New(ctx logger.Context) (logger.Logger, error) { - initGCP() - - var project string - if projectID != "" { - project = projectID - } - if projectID, found := ctx.Config[projectOptKey]; found { - project = projectID - } - if project == "" { - return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") - } - - c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") - if err != nil { - return nil, err - } - - if err := c.Ping(); err != nil { - return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) - } - - l := &gcplogs{ - client: c, - container: &containerInfo{ - Name: ctx.ContainerName, - ID: ctx.ContainerID, - ImageName: ctx.ContainerImageName, - ImageID: ctx.ContainerImageID, - Created: ctx.ContainerCreated, - Metadata: ctx.ExtraAttributes(nil), - }, - } - - if ctx.Config[logCmdKey] == "true" { - l.container.Command = ctx.Command() - } - - if onGCE { - l.instance = &instanceInfo{ - Zone: zone, - Name: instanceName, - ID: instanceID, - } - } - - // The logger "overflows" at a rate of 10,000 logs per second and this - // overflow func is called. We want to surface the error to the user - // without overly spamming /var/log/docker.log so we log the first time - // we overflow and every 1000th time after. - c.Overflow = func(_ *logging.Client, _ logging.Entry) error { - if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { - logrus.Errorf("gcplogs driver has dropped %v logs", i) - } - return nil - } - - return l, nil -} - -// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs -// driver doesn't take any arguments. -func ValidateLogOpts(cfg map[string]string) error { - for k := range cfg { - switch k { - case projectOptKey, logLabelsKey, logEnvKey, logCmdKey: - default: - return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) - } - } - return nil -} - -func (l *gcplogs) Log(m *logger.Message) error { - return l.client.Log(logging.Entry{ - Time: m.Timestamp, - Payload: &dockerLogEntry{ - Instance: l.instance, - Container: l.container, - Data: string(m.Line), - }, - }) -} - -func (l *gcplogs) Close() error { - return l.client.Flush() -} - -func (l *gcplogs) Name() string { - return name -} diff --git a/daemon/logger/gelf/gelf.go b/daemon/logger/gelf/gelf.go deleted file mode 100644 index 8cc77ce15..000000000 --- a/daemon/logger/gelf/gelf.go +++ /dev/null @@ -1,209 +0,0 @@ -// +build linux - -// Package gelf provides the log driver for forwarding server logs to -// endpoints that support the Graylog Extended Log Format. -package gelf - -import ( - "bytes" - "compress/flate" - "encoding/json" - "fmt" - "net" - "net/url" - "strconv" - "time" - - "github.com/Graylog2/go-gelf/gelf" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" -) - -const name = "gelf" - -type gelfLogger struct { - writer *gelf.Writer - ctx logger.Context - hostname string - rawExtra json.RawMessage -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a gelf logger using the configuration passed in on the -// context. The supported context configuration variable is gelf-address. -func New(ctx logger.Context) (logger.Logger, error) { - // parse gelf address - address, err := parseAddress(ctx.Config["gelf-address"]) - if err != nil { - return nil, err - } - - // collect extra data for GELF message - hostname, err := ctx.Hostname() - if err != nil { - return nil, fmt.Errorf("gelf: cannot access hostname to set source field") - } - - // remove trailing slash from container name - containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") - - // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, "") - if err != nil { - return nil, err - } - - extra := map[string]interface{}{ - "_container_id": ctx.ContainerID, - "_container_name": string(containerName), - "_image_id": ctx.ContainerImageID, - "_image_name": ctx.ContainerImageName, - "_command": ctx.Command(), - "_tag": tag, - "_created": ctx.ContainerCreated, - } - - extraAttrs := ctx.ExtraAttributes(func(key string) string { - if key[0] == '_' { - return key - } - return "_" + key - }) - for k, v := range extraAttrs { - extra[k] = v - } - - rawExtra, err := json.Marshal(extra) - if err != nil { - return nil, err - } - - // create new gelfWriter - gelfWriter, err := gelf.NewWriter(address) - if err != nil { - return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) - } - - if v, ok := ctx.Config["gelf-compression-type"]; ok { - switch v { - case "gzip": - gelfWriter.CompressionType = gelf.CompressGzip - case "zlib": - gelfWriter.CompressionType = gelf.CompressZlib - case "none": - gelfWriter.CompressionType = gelf.CompressNone - default: - return nil, fmt.Errorf("gelf: invalid compression type %q", v) - } - } - - if v, ok := ctx.Config["gelf-compression-level"]; ok { - val, err := strconv.Atoi(v) - if err != nil { - return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) - } - gelfWriter.CompressionLevel = val - } - - return &gelfLogger{ - writer: gelfWriter, - ctx: ctx, - hostname: hostname, - rawExtra: rawExtra, - }, nil -} - -func (s *gelfLogger) Log(msg *logger.Message) error { - level := gelf.LOG_INFO - if msg.Source == "stderr" { - level = gelf.LOG_ERR - } - - m := gelf.Message{ - Version: "1.1", - Host: s.hostname, - Short: string(msg.Line), - TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, - Level: level, - RawExtra: s.rawExtra, - } - - if err := s.writer.WriteMessage(&m); err != nil { - return fmt.Errorf("gelf: cannot send GELF message: %v", err) - } - return nil -} - -func (s *gelfLogger) Close() error { - return s.writer.Close() -} - -func (s *gelfLogger) Name() string { - return name -} - -// ValidateLogOpt looks for gelf specific log option gelf-address. -func ValidateLogOpt(cfg map[string]string) error { - for key, val := range cfg { - switch key { - case "gelf-address": - case "tag": - case "labels": - case "env": - case "gelf-compression-level": - i, err := strconv.Atoi(val) - if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - case "gelf-compression-type": - switch val { - case "gzip", "zlib", "none": - default: - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - default: - return fmt.Errorf("unknown log opt %q for gelf log driver", key) - } - } - - if _, err := parseAddress(cfg["gelf-address"]); err != nil { - return err - } - - return nil -} - -func parseAddress(address string) (string, error) { - if address == "" { - return "", nil - } - if !urlutil.IsTransportURL(address) { - return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return "", err - } - - // we support only udp - if url.Scheme != "udp" { - return "", fmt.Errorf("gelf: endpoint needs to be UDP") - } - - // get host and port - if _, _, err = net.SplitHostPort(url.Host); err != nil { - return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") - } - - return url.Host, nil -} diff --git a/daemon/logger/gelf/gelf_unsupported.go b/daemon/logger/gelf/gelf_unsupported.go deleted file mode 100644 index 266f73b18..000000000 --- a/daemon/logger/gelf/gelf_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package gelf diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go deleted file mode 100644 index 748dd8b24..000000000 --- a/daemon/logger/journald/journald.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -// Package journald provides the log driver for forwarding server logs -// to endpoints that receive the systemd format. -package journald - -import ( - "fmt" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" -) - -const name = "journald" - -type journald struct { - vars map[string]string // additional variables and values to send to the journal along with the log message - readers readerList -} - -type readerList struct { - mu sync.Mutex - readers map[*logger.LogWatcher]*logger.LogWatcher -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a journald logger using the configuration passed in on -// the context. -func New(ctx logger.Context) (logger.Logger, error) { - if !journal.Enabled() { - return nil, fmt.Errorf("journald is not enabled on this host") - } - // Strip a leading slash so that people can search for - // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. - name := ctx.ContainerName - if name[0] == '/' { - name = name[1:] - } - - // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, "") - if err != nil { - return nil, err - } - - vars := map[string]string{ - "CONTAINER_ID": ctx.ContainerID[:12], - "CONTAINER_ID_FULL": ctx.ContainerID, - "CONTAINER_NAME": name, - "CONTAINER_TAG": tag, - } - extraAttrs := ctx.ExtraAttributes(strings.ToTitle) - for k, v := range extraAttrs { - vars[k] = v - } - return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil -} - -// We don't actually accept any options, but we have to supply a callback for -// the factory to pass the (probably empty) configuration map to. -func validateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "labels": - case "env": - case "tag": - default: - return fmt.Errorf("unknown log opt '%s' for journald log driver", key) - } - } - return nil -} - -func (s *journald) Log(msg *logger.Message) error { - if msg.Source == "stderr" { - return journal.Send(string(msg.Line), journal.PriErr, s.vars) - } - return journal.Send(string(msg.Line), journal.PriInfo, s.vars) -} - -func (s *journald) Name() string { - return name -} diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go deleted file mode 100644 index d52ca92e4..000000000 --- a/daemon/logger/journald/journald_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !linux - -package journald - -type journald struct { -} diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go deleted file mode 100644 index bc009f61c..000000000 --- a/daemon/logger/journald/read.go +++ /dev/null @@ -1,392 +0,0 @@ -// +build linux,cgo,!static_build,journald - -package journald - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// -//static int get_message(sd_journal *j, const char **msg, size_t *length) -//{ -// int rc; -// *msg = NULL; -// *length = 0; -// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); -// if (rc == 0) { -// if (*length > 8) { -// (*msg) += 8; -// *length -= 8; -// } else { -// *msg = NULL; -// *length = 0; -// rc = -ENOENT; -// } -// } -// return rc; -//} -//static int get_priority(sd_journal *j, int *priority) -//{ -// const void *data; -// size_t i, length; -// int rc; -// *priority = -1; -// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); -// if (rc == 0) { -// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { -// *priority = 0; -// for (i = 9; i < length; i++) { -// *priority = *priority * 10 + ((const char *)data)[i] - '0'; -// } -// if (length > 9) { -// rc = 0; -// } -// } -// } -// return rc; -//} -//static int is_attribute_field(const char *msg, size_t length) -//{ -// const struct known_field { -// const char *name; -// size_t length; -// } fields[] = { -// {"MESSAGE", sizeof("MESSAGE") - 1}, -// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, -// {"PRIORITY", sizeof("PRIORITY") - 1}, -// {"CODE_FILE", sizeof("CODE_FILE") - 1}, -// {"CODE_LINE", sizeof("CODE_LINE") - 1}, -// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, -// {"ERRNO", sizeof("ERRNO") - 1}, -// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, -// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, -// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, -// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, -// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, -// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, -// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, -// }; -// unsigned int i; -// void *p; -// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { -// return -1; -// } -// length = ((const char *) p) - msg; -// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { -// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { -// return -1; -// } -// } -// return 0; -//} -//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) -//{ -// int rc; -// *msg = NULL; -// *length = 0; -// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { -// if (is_attribute_field(*msg, *length) == 0) { -// break; -// } -// rc = -ENOENT; -// } -// return rc; -//} -//static int wait_for_data_or_close(sd_journal *j, int pipefd) -//{ -// struct pollfd fds[2]; -// uint64_t when = 0; -// int timeout, jevents, i; -// struct timespec ts; -// uint64_t now; -// do { -// memset(&fds, 0, sizeof(fds)); -// fds[0].fd = pipefd; -// fds[0].events = POLLHUP; -// fds[1].fd = sd_journal_get_fd(j); -// if (fds[1].fd < 0) { -// return fds[1].fd; -// } -// jevents = sd_journal_get_events(j); -// if (jevents < 0) { -// return jevents; -// } -// fds[1].events = jevents; -// sd_journal_get_timeout(j, &when); -// if (when == -1) { -// timeout = -1; -// } else { -// clock_gettime(CLOCK_MONOTONIC, &ts); -// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; -// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; -// } -// i = poll(fds, 2, timeout); -// if ((i == -1) && (errno != EINTR)) { -// /* An unexpected error. */ -// return (errno != 0) ? -errno : -EINTR; -// } -// if (fds[0].revents & POLLHUP) { -// /* The close notification pipe was closed. */ -// return 0; -// } -// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { -// /* Data, which we might care about, was appended. */ -// return 1; -// } -// } while ((fds[0].revents & POLLHUP) == 0); -// return 0; -//} -import "C" - -import ( - "fmt" - "strings" - "time" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/daemon/logger" -) - -func (s *journald) Close() error { - s.readers.mu.Lock() - for reader := range s.readers.readers { - reader.Close() - } - s.readers.mu.Unlock() - return nil -} - -func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { - var msg, data, cursor *C.char - var length C.size_t - var stamp C.uint64_t - var priority C.int - - // Walk the journal from here forward until we run out of new entries. -drain: - for { - // Try not to send a given entry twice. - if oldCursor != "" { - ccursor := C.CString(oldCursor) - defer C.free(unsafe.Pointer(ccursor)) - for C.sd_journal_test_cursor(j, ccursor) > 0 { - if C.sd_journal_next(j) <= 0 { - break drain - } - } - } - // Read and send the logged message, if there is one to read. - i := C.get_message(j, &msg, &length) - if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { - // Read the entry's timestamp. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } - // Set up the time and text of the entry. - timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) - line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) - // Recover the stream name by mapping - // from the journal priority back to - // the stream that we would have - // assigned that value. - source := "" - if C.get_priority(j, &priority) != 0 { - source = "" - } else if priority == C.int(journal.PriErr) { - source = "stderr" - } else if priority == C.int(journal.PriInfo) { - source = "stdout" - } - // Retrieve the values of any variables we're adding to the journal. - attrs := make(map[string]string) - C.sd_journal_restart_data(j) - for C.get_attribute_field(j, &data, &length) > C.int(0) { - kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) - attrs[kv[0]] = kv[1] - } - if len(attrs) == 0 { - attrs = nil - } - // Send the log message. - logWatcher.Msg <- &logger.Message{ - Line: line, - Source: source, - Timestamp: timestamp.In(time.UTC), - Attrs: attrs, - } - } - // If we're at the end of the journal, we're done (for now). - if C.sd_journal_next(j) <= 0 { - break - } - } - retCursor := "" - if C.sd_journal_get_cursor(j, &cursor) == 0 { - retCursor = C.GoString(cursor) - C.free(unsafe.Pointer(cursor)) - } - return retCursor -} - -func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) { - s.readers.mu.Lock() - s.readers.readers[logWatcher] = logWatcher - s.readers.mu.Unlock() - go func() { - // Keep copying journal data out until we're notified to stop - // or we hit an error. - status := C.wait_for_data_or_close(j, pfd[0]) - for status == 1 { - cursor = s.drainJournal(logWatcher, config, j, cursor) - status = C.wait_for_data_or_close(j, pfd[0]) - } - if status < 0 { - cerrstr := C.strerror(C.int(-status)) - errstr := C.GoString(cerrstr) - fmtstr := "error %q while attempting to follow journal for container %q" - logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) - } - // Clean up. - C.close(pfd[0]) - s.readers.mu.Lock() - delete(s.readers.readers, logWatcher) - s.readers.mu.Unlock() - C.sd_journal_close(j) - close(logWatcher.Msg) - }() - // Wait until we're told to stop. - select { - case <-logWatcher.WatchClose(): - // Notify the other goroutine that its work is done. - C.close(pfd[1]) - } -} - -func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - var j *C.sd_journal - var cmatch *C.char - var stamp C.uint64_t - var sinceUnixMicro uint64 - var pipes [2]C.int - cursor := "" - - // Get a handle to the journal. - rc := C.sd_journal_open(&j, C.int(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error opening journal") - close(logWatcher.Msg) - return - } - // If we end up following the log, we can set the journal context - // pointer and the channel pointer to nil so that we won't close them - // here, potentially while the goroutine that uses them is still - // running. Otherwise, close them when we return from this function. - following := false - defer func(pfollowing *bool) { - if !*pfollowing { - C.sd_journal_close(j) - close(logWatcher.Msg) - } - }(&following) - // Remove limits on the size of data items that we'll retrieve. - rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal data threshold") - return - } - // Add a match to have the library do the searching for us. - cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) - defer C.free(unsafe.Pointer(cmatch)) - rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal match") - return - } - // If we have a cutoff time, convert it to Unix time once. - if !config.Since.IsZero() { - nano := config.Since.UnixNano() - sinceUnixMicro = uint64(nano / 1000) - } - if config.Tail > 0 { - lines := config.Tail - // Start at the end of the journal. - if C.sd_journal_seek_tail(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to end of journal") - return - } - if C.sd_journal_previous(j) < 0 { - logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") - return - } - // Walk backward. - for lines > 0 { - // Stop if the entry time is before our cutoff. - // We'll need the entry time if it isn't, so go - // ahead and parse it now. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } else { - // Compare the timestamp on the entry - // to our threshold value. - if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { - break - } - } - lines-- - // If we're at the start of the journal, or - // don't need to back up past any more entries, - // stop. - if lines == 0 || C.sd_journal_previous(j) <= 0 { - break - } - } - } else { - // Start at the beginning of the journal. - if C.sd_journal_seek_head(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start of journal") - return - } - // If we have a cutoff date, fast-forward to it. - if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") - return - } - if C.sd_journal_next(j) < 0 { - logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") - return - } - } - cursor = s.drainJournal(logWatcher, config, j, "") - if config.Follow { - // Allocate a descriptor for following the journal, if we'll - // need one. Do it here so that we can report if it fails. - if fd := C.sd_journal_get_fd(j); fd < C.int(0) { - logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) - } else { - // Create a pipe that we can poll at the same time as - // the journald descriptor. - if C.pipe(&pipes[0]) == C.int(-1) { - logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") - } else { - s.followJournal(logWatcher, config, j, pipes, cursor) - // Let followJournal handle freeing the journal context - // object and closing the channel. - following = true - } - } - } - return -} - -func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - go s.readLogs(logWatcher, config) - return logWatcher -} diff --git a/daemon/logger/journald/read_native.go b/daemon/logger/journald/read_native.go deleted file mode 100644 index bba6de55b..000000000 --- a/daemon/logger/journald/read_native.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,!journald_compat - -package journald - -// #cgo pkg-config: libsystemd -import "C" diff --git a/daemon/logger/journald/read_native_compat.go b/daemon/logger/journald/read_native_compat.go deleted file mode 100644 index 3f7a43c59..000000000 --- a/daemon/logger/journald/read_native_compat.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,journald_compat - -package journald - -// #cgo pkg-config: libsystemd-journal -import "C" diff --git a/daemon/logger/journald/read_unsupported.go b/daemon/logger/journald/read_unsupported.go deleted file mode 100644 index b43abdcaf..000000000 --- a/daemon/logger/journald/read_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux !cgo static_build !journald - -package journald - -func (s *journald) Close() error { - return nil -} diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go deleted file mode 100644 index 9faa4e02d..000000000 --- a/daemon/logger/jsonfilelog/jsonfilelog.go +++ /dev/null @@ -1,147 +0,0 @@ -// Package jsonfilelog provides the default Logger implementation for -// Docker logging. This logger logs to files on the host server in the -// JSON format. -package jsonfilelog - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/go-units" -) - -// Name is the name of the file that the jsonlogger logs to. -const Name = "json-file" - -// JSONFileLogger is Logger implementation for default Docker logging. -type JSONFileLogger struct { - buf *bytes.Buffer - writer *loggerutils.RotateFileWriter - mu sync.Mutex - readers map[*logger.LogWatcher]struct{} // stores the active log followers - extra []byte // json-encoded extra attributes -} - -func init() { - if err := logger.RegisterLogDriver(Name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates new JSONFileLogger which writes to filename passed in -// on given context. -func New(ctx logger.Context) (logger.Logger, error) { - var capval int64 = -1 - if capacity, ok := ctx.Config["max-size"]; ok { - var err error - capval, err = units.FromHumanSize(capacity) - if err != nil { - return nil, err - } - } - var maxFiles = 1 - if maxFileString, ok := ctx.Config["max-file"]; ok { - var err error - maxFiles, err = strconv.Atoi(maxFileString) - if err != nil { - return nil, err - } - if maxFiles < 1 { - return nil, fmt.Errorf("max-file cannot be less than 1") - } - } - - writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) - if err != nil { - return nil, err - } - - var extra []byte - if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { - var err error - extra, err = json.Marshal(attrs) - if err != nil { - return nil, err - } - } - - return &JSONFileLogger{ - buf: bytes.NewBuffer(nil), - writer: writer, - readers: make(map[*logger.LogWatcher]struct{}), - extra: extra, - }, nil -} - -// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. -func (l *JSONFileLogger) Log(msg *logger.Message) error { - timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) - if err != nil { - return err - } - l.mu.Lock() - err = (&jsonlog.JSONLogs{ - Log: append(msg.Line, '\n'), - Stream: msg.Source, - Created: timestamp, - RawAttrs: l.extra, - }).MarshalJSONBuf(l.buf) - if err != nil { - l.mu.Unlock() - return err - } - - l.buf.WriteByte('\n') - _, err = l.writer.Write(l.buf.Bytes()) - l.buf.Reset() - l.mu.Unlock() - - return err -} - -// ValidateLogOpt looks for json specific log options max-file & max-size. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "max-file": - case "max-size": - case "labels": - case "env": - default: - return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) - } - } - return nil -} - -// LogPath returns the location the given json logger logs to. -func (l *JSONFileLogger) LogPath() string { - return l.writer.LogPath() -} - -// Close closes underlying file and signals all readers to stop. -func (l *JSONFileLogger) Close() error { - l.mu.Lock() - err := l.writer.Close() - for r := range l.readers { - r.Close() - delete(l.readers, r) - } - l.mu.Unlock() - return err -} - -// Name returns name of this logger. -func (l *JSONFileLogger) Name() string { - return Name -} diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go deleted file mode 100644 index b5b818a8b..000000000 --- a/daemon/logger/jsonfilelog/jsonfilelog_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package jsonfilelog - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strconv" - "testing" - "time" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/jsonlog" -) - -func TestJSONFileLogger(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { - t.Fatal(err) - } - if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { - t.Fatal(err) - } - if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { - t.Fatal(err) - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} -{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} -` - - if string(res) != expected { - t.Fatalf("Wrong log content: %q, expected %q", res, expected) - } -} - -func BenchmarkJSONFileLogger(b *testing.B) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - }) - if err != nil { - b.Fatal(err) - } - defer l.Close() - - testLine := "Line that thinks that it is log line from docker\n" - msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(jsonlog)+1) * 30) - b.ResetTimer() - for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - if err := l.Log(msg); err != nil { - b.Fatal(err) - } - } - } -} - -func TestJSONFileLoggerWithOpts(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - config := map[string]string{"max-file": "2", "max-size": "1k"} - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - Config: config, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - for i := 0; i < 20; i++ { - if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { - t.Fatal(err) - } - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - penUlt, err := ioutil.ReadFile(filename + ".1") - if err != nil { - t.Fatal(err) - } - - expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -` - expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -` - - if string(res) != expected { - t.Fatalf("Wrong log content: %q, expected %q", res, expected) - } - if string(penUlt) != expectedPenultimate { - t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) - } - -} - -func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - Config: config, - ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, - ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { - t.Fatal(err) - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - - var jsonLog jsonlog.JSONLogs - if err := json.Unmarshal(res, &jsonLog); err != nil { - t.Fatal(err) - } - extra := make(map[string]string) - if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { - t.Fatal(err) - } - expected := map[string]string{ - "rack": "101", - "dc": "lhr", - "environ": "production", - "debug": "false", - "ssl": "true", - } - if !reflect.DeepEqual(extra, expected) { - t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) - } -} - -func BenchmarkJSONFileLoggerWithReader(b *testing.B) { - b.StopTimer() - b.ResetTimer() - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - dir, err := ioutil.TempDir("", "json-logger-bench") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) - - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filepath.Join(dir, "container.log"), - }) - if err != nil { - b.Fatal(err) - } - defer l.Close() - msg := &logger.Message{Line: []byte("line"), Source: "src1"} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(jsonlog)+1) * 30) - - b.StartTimer() - - go func() { - for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - l.Log(msg) - } - } - l.Close() - }() - - lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) - watchClose := lw.WatchClose() - for { - select { - case <-lw.Msg: - case <-watchClose: - return - } - } -} diff --git a/daemon/logger/jsonfilelog/read.go b/daemon/logger/jsonfilelog/read.go deleted file mode 100644 index bea83ddf1..000000000 --- a/daemon/logger/jsonfilelog/read.go +++ /dev/null @@ -1,239 +0,0 @@ -package jsonfilelog - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/filenotify" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/tailfile" -) - -const maxJSONDecodeRetry = 20000 - -func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { - l.Reset() - if err := dec.Decode(l); err != nil { - return nil, err - } - msg := &logger.Message{ - Source: l.Stream, - Timestamp: l.Created, - Line: []byte(l.Log), - Attrs: l.Attrs, - } - return msg, nil -} - -// ReadLogs implements the logger's LogReader interface for the logs -// created by this driver. -func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - - go l.readLogs(logWatcher, config) - return logWatcher -} - -func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - defer close(logWatcher.Msg) - - pth := l.writer.LogPath() - var files []io.ReadSeeker - for i := l.writer.MaxFiles(); i > 1; i-- { - f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) - if err != nil { - if !os.IsNotExist(err) { - logWatcher.Err <- err - break - } - continue - } - files = append(files, f) - } - - latestFile, err := os.Open(pth) - if err != nil { - logWatcher.Err <- err - return - } - - if config.Tail != 0 { - tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) - tailFile(tailer, logWatcher, config.Tail, config.Since) - } - - // close all the rotated files - for _, f := range files { - if err := f.(io.Closer).Close(); err != nil { - logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) - } - } - - if !config.Follow { - if err := latestFile.Close(); err != nil { - logrus.Errorf("Error closing file: %v", err) - } - return - } - - if config.Tail >= 0 { - latestFile.Seek(0, os.SEEK_END) - } - - l.mu.Lock() - l.readers[logWatcher] = struct{}{} - l.mu.Unlock() - - notifyRotate := l.writer.NotifyRotate() - followLogs(latestFile, logWatcher, notifyRotate, config.Since) - - l.mu.Lock() - delete(l.readers, logWatcher) - l.mu.Unlock() - - l.writer.NotifyRotateEvict(notifyRotate) -} - -func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { - var rdr io.Reader = f - if tail > 0 { - ls, err := tailfile.TailFile(f, tail) - if err != nil { - logWatcher.Err <- err - return - } - rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) - } - dec := json.NewDecoder(rdr) - l := &jsonlog.JSONLog{} - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err != io.EOF { - logWatcher.Err <- err - } - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg - } -} - -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { - dec := json.NewDecoder(f) - l := &jsonlog.JSONLog{} - - fileWatcher, err := filenotify.New() - if err != nil { - logWatcher.Err <- err - } - defer func() { - f.Close() - fileWatcher.Close() - }() - name := f.Name() - - if err := fileWatcher.Add(name); err != nil { - logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) - fileWatcher.Close() - fileWatcher = filenotify.NewPollingWatcher() - - if err := fileWatcher.Add(name); err != nil { - logrus.Debugf("error watching log file for modifications: %v", err) - logWatcher.Err <- err - return - } - } - - var retries int - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err != io.EOF { - // try again because this shouldn't happen - if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { - dec = json.NewDecoder(f) - retries++ - continue - } - - // io.ErrUnexpectedEOF is returned from json.Decoder when there is - // remaining data in the parser's buffer while an io.EOF occurs. - // If the json logger writes a partial json log entry to the disk - // while at the same time the decoder tries to decode it, the race condition happens. - if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { - reader := io.MultiReader(dec.Buffered(), f) - dec = json.NewDecoder(reader) - retries++ - continue - } - - return - } - - select { - case <-fileWatcher.Events(): - dec = json.NewDecoder(f) - continue - case <-fileWatcher.Errors(): - logWatcher.Err <- err - return - case <-logWatcher.WatchClose(): - fileWatcher.Remove(name) - return - case <-notifyRotate: - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = os.Open(name) - if err == nil || !os.IsNotExist(err) { - break - } - } - - if err = fileWatcher.Add(name); err != nil { - logWatcher.Err <- err - return - } - if err != nil { - logWatcher.Err <- err - return - } - - dec = json.NewDecoder(f) - continue - } - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - select { - case logWatcher.Msg <- msg: - case <-logWatcher.WatchClose(): - logWatcher.Msg <- msg - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg - } - } - } -} diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go deleted file mode 100644 index 77c6c90ce..000000000 --- a/daemon/logger/logger.go +++ /dev/null @@ -1,113 +0,0 @@ -// Package logger defines interfaces that logger drivers implement to -// log messages. -// -// The other half of a logger driver is the implementation of the -// factory, which holds the contextual instance information that -// allows multiple loggers of the same type to perform different -// actions, such as logging to different locations. -package logger - -import ( - "errors" - "sort" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/jsonlog" -) - -// ErrReadLogsNotSupported is returned when the logger does not support reading logs. -var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") - -const ( - // TimeFormat is the time format used for timestamps sent to log readers. - TimeFormat = jsonlog.RFC3339NanoFixed - logWatcherBufferSize = 4096 -) - -// Message is datastructure that represents record from some container. -type Message struct { - Line []byte - Source string - Timestamp time.Time - Attrs LogAttributes -} - -// LogAttributes is used to hold the extra attributes available in the log message -// Primarily used for converting the map type to string and sorting. -type LogAttributes map[string]string -type byKey []string - -func (s byKey) Len() int { return len(s) } -func (s byKey) Less(i, j int) bool { - keyI := strings.Split(s[i], "=") - keyJ := strings.Split(s[j], "=") - return keyI[0] < keyJ[0] -} -func (s byKey) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (a LogAttributes) String() string { - var ss byKey - for k, v := range a { - ss = append(ss, k+"="+v) - } - sort.Sort(ss) - return strings.Join(ss, ",") -} - -// Logger is the interface for docker logging drivers. -type Logger interface { - Log(*Message) error - Name() string - Close() error -} - -// ReadConfig is the configuration passed into ReadLogs. -type ReadConfig struct { - Since time.Time - Tail int - Follow bool -} - -// LogReader is the interface for reading log messages for loggers that support reading. -type LogReader interface { - // Read logs from underlying logging backend - ReadLogs(ReadConfig) *LogWatcher -} - -// LogWatcher is used when consuming logs read from the LogReader interface. -type LogWatcher struct { - // For sending log messages to a reader. - Msg chan *Message - // For sending error messages that occur while while reading logs. - Err chan error - closeOnce sync.Once - closeNotifier chan struct{} -} - -// NewLogWatcher returns a new LogWatcher. -func NewLogWatcher() *LogWatcher { - return &LogWatcher{ - Msg: make(chan *Message, logWatcherBufferSize), - Err: make(chan error, 1), - closeNotifier: make(chan struct{}), - } -} - -// Close notifies the underlying log reader to stop. -func (w *LogWatcher) Close() { - // only close if not already closed - w.closeOnce.Do(func() { - close(w.closeNotifier) - }) -} - -// WatchClose returns a channel receiver that receives notification -// when the watcher has been closed. This should only be called from -// one goroutine. -func (w *LogWatcher) WatchClose() <-chan struct{} { - return w.closeNotifier -} diff --git a/daemon/logger/loggerutils/log_tag.go b/daemon/logger/loggerutils/log_tag.go deleted file mode 100644 index 0ad62df20..000000000 --- a/daemon/logger/loggerutils/log_tag.go +++ /dev/null @@ -1,28 +0,0 @@ -package loggerutils - -import ( - "bytes" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/utils/templates" -) - -// ParseLogTag generates a context aware tag for consistency across different -// log drivers based on the context of the running container. -func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { - tagTemplate := ctx.Config["tag"] - if tagTemplate == "" { - tagTemplate = defaultTemplate - } - - tmpl, err := templates.NewParse("log-tag", tagTemplate) - if err != nil { - return "", err - } - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, &ctx); err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/daemon/logger/loggerutils/log_tag_test.go b/daemon/logger/loggerutils/log_tag_test.go deleted file mode 100644 index e2aa4358a..000000000 --- a/daemon/logger/loggerutils/log_tag_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package loggerutils - -import ( - "testing" - - "github.com/docker/docker/daemon/logger" -) - -func TestParseLogTagDefaultTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.ID}}") - assertTag(t, e, tag, ctx.ID()) -} - -func TestParseLogTag(t *testing.T) { - ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) - tag, e := ParseLogTag(ctx, "{{.ID}}") - assertTag(t, e, tag, "test-image/test-container/container-ab") -} - -func TestParseLogTagEmptyTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") - assertTag(t, e, tag, "test-dockerd/container-ab") -} - -// Helpers - -func buildContext(cfg map[string]string) logger.Context { - return logger.Context{ - ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", - ContainerName: "/test-container", - ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", - ContainerImageName: "test-image", - Config: cfg, - DaemonName: "test-dockerd", - } -} - -func assertTag(t *testing.T, e error, tag string, expected string) { - if e != nil { - t.Fatalf("Error generating tag: %q", e) - } - if tag != expected { - t.Fatalf("Wrong tag: %q, should be %q", tag, expected) - } -} diff --git a/daemon/logger/loggerutils/rotatefilewriter.go b/daemon/logger/loggerutils/rotatefilewriter.go deleted file mode 100644 index 99e0964ae..000000000 --- a/daemon/logger/loggerutils/rotatefilewriter.go +++ /dev/null @@ -1,124 +0,0 @@ -package loggerutils - -import ( - "os" - "strconv" - "sync" - - "github.com/docker/docker/pkg/pubsub" -) - -// RotateFileWriter is Logger implementation for default Docker logging. -type RotateFileWriter struct { - f *os.File // store for closing - mu sync.Mutex - capacity int64 //maximum size of each file - currentSize int64 // current size of the latest file - maxFiles int //maximum number of files - notifyRotate *pubsub.Publisher -} - -//NewRotateFileWriter creates new RotateFileWriter -func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { - log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) - if err != nil { - return nil, err - } - - size, err := log.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - - return &RotateFileWriter{ - f: log, - capacity: capacity, - currentSize: size, - maxFiles: maxFiles, - notifyRotate: pubsub.NewPublisher(0, 1), - }, nil -} - -//WriteLog write log message to File -func (w *RotateFileWriter) Write(message []byte) (int, error) { - w.mu.Lock() - if err := w.checkCapacityAndRotate(); err != nil { - w.mu.Unlock() - return -1, err - } - - n, err := w.f.Write(message) - if err == nil { - w.currentSize += int64(n) - } - w.mu.Unlock() - return n, err -} - -func (w *RotateFileWriter) checkCapacityAndRotate() error { - if w.capacity == -1 { - return nil - } - - if w.currentSize >= w.capacity { - name := w.f.Name() - if err := w.f.Close(); err != nil { - return err - } - if err := rotate(name, w.maxFiles); err != nil { - return err - } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) - if err != nil { - return err - } - w.f = file - w.currentSize = 0 - w.notifyRotate.Publish(struct{}{}) - } - - return nil -} - -func rotate(name string, maxFiles int) error { - if maxFiles < 2 { - return nil - } - for i := maxFiles - 1; i > 1; i-- { - toPath := name + "." + strconv.Itoa(i) - fromPath := name + "." + strconv.Itoa(i-1) - if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { - return err - } - } - - if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// LogPath returns the location the given writer logs to. -func (w *RotateFileWriter) LogPath() string { - return w.f.Name() -} - -// MaxFiles return maximum number of files -func (w *RotateFileWriter) MaxFiles() int { - return w.maxFiles -} - -//NotifyRotate returns the new subscriber -func (w *RotateFileWriter) NotifyRotate() chan interface{} { - return w.notifyRotate.Subscribe() -} - -//NotifyRotateEvict removes the specified subscriber from receiving any more messages. -func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { - w.notifyRotate.Evict(sub) -} - -// Close closes underlying file and signals all readers to stop. -func (w *RotateFileWriter) Close() error { - return w.f.Close() -} diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go deleted file mode 100644 index e587e66aa..000000000 --- a/daemon/logger/splunk/splunk.go +++ /dev/null @@ -1,266 +0,0 @@ -// Package splunk provides the log driver for forwarding server logs to -// Splunk HTTP Event Collector endpoint. -package splunk - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" -) - -const ( - driverName = "splunk" - splunkURLKey = "splunk-url" - splunkTokenKey = "splunk-token" - splunkSourceKey = "splunk-source" - splunkSourceTypeKey = "splunk-sourcetype" - splunkIndexKey = "splunk-index" - splunkCAPathKey = "splunk-capath" - splunkCANameKey = "splunk-caname" - splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" - envKey = "env" - labelsKey = "labels" - tagKey = "tag" -) - -type splunkLogger struct { - client *http.Client - transport *http.Transport - - url string - auth string - nullMessage *splunkMessage -} - -type splunkMessage struct { - Event splunkMessageEvent `json:"event"` - Time string `json:"time"` - Host string `json:"host"` - Source string `json:"source,omitempty"` - SourceType string `json:"sourcetype,omitempty"` - Index string `json:"index,omitempty"` -} - -type splunkMessageEvent struct { - Line string `json:"line"` - Source string `json:"source"` - Tag string `json:"tag,omitempty"` - Attrs map[string]string `json:"attrs,omitempty"` -} - -func init() { - if err := logger.RegisterLogDriver(driverName, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates splunk logger driver using configuration passed in context -func New(ctx logger.Context) (logger.Logger, error) { - hostname, err := ctx.Hostname() - if err != nil { - return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) - } - - // Parse and validate Splunk URL - splunkURL, err := parseURL(ctx) - if err != nil { - return nil, err - } - - // Splunk Token is required parameter - splunkToken, ok := ctx.Config[splunkTokenKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) - } - - tlsConfig := &tls.Config{} - - // Splunk is using autogenerated certificates by default, - // allow users to trust them with skipping verification - if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { - insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) - if err != nil { - return nil, err - } - tlsConfig.InsecureSkipVerify = insecureSkipVerify - } - - // If path to the root certificate is provided - load it - if caPath, ok := ctx.Config[splunkCAPathKey]; ok { - caCert, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, err - } - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caPool - } - - if caName, ok := ctx.Config[splunkCANameKey]; ok { - tlsConfig.ServerName = caName - } - - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - } - client := &http.Client{ - Transport: transport, - } - - var nullMessage = &splunkMessage{ - Host: hostname, - } - - // Optional parameters for messages - nullMessage.Source = ctx.Config[splunkSourceKey] - nullMessage.SourceType = ctx.Config[splunkSourceTypeKey] - nullMessage.Index = ctx.Config[splunkIndexKey] - - tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") - if err != nil { - return nil, err - } - nullMessage.Event.Tag = tag - nullMessage.Event.Attrs = ctx.ExtraAttributes(nil) - - logger := &splunkLogger{ - client: client, - transport: transport, - url: splunkURL.String(), - auth: "Splunk " + splunkToken, - nullMessage: nullMessage, - } - - err = verifySplunkConnection(logger) - if err != nil { - return nil, err - } - - return logger, nil -} - -func (l *splunkLogger) Log(msg *logger.Message) error { - // Construct message as a copy of nullMessage - message := *l.nullMessage - message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/1000000000) - message.Event.Line = string(msg.Line) - message.Event.Source = msg.Source - - jsonEvent, err := json.Marshal(&message) - if err != nil { - return err - } - req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(jsonEvent)) - if err != nil { - return err - } - req.Header.Set("Authorization", l.auth) - res, err := l.client.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) - } - io.Copy(ioutil.Discard, res.Body) - return nil -} - -func (l *splunkLogger) Close() error { - l.transport.CloseIdleConnections() - return nil -} - -func (l *splunkLogger) Name() string { - return driverName -} - -// ValidateLogOpt looks for all supported by splunk driver options -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case splunkURLKey: - case splunkTokenKey: - case splunkSourceKey: - case splunkSourceTypeKey: - case splunkIndexKey: - case splunkCAPathKey: - case splunkCANameKey: - case splunkInsecureSkipVerifyKey: - case envKey: - case labelsKey: - case tagKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) - } - } - return nil -} - -func parseURL(ctx logger.Context) (*url.URL, error) { - splunkURLStr, ok := ctx.Config[splunkURLKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) - } - - splunkURL, err := url.Parse(splunkURLStr) - if err != nil { - return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) - } - - if !urlutil.IsURL(splunkURLStr) || - !splunkURL.IsAbs() || - (splunkURL.Path != "" && splunkURL.Path != "/") || - splunkURL.RawQuery != "" || - splunkURL.Fragment != "" { - return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) - } - - splunkURL.Path = "/services/collector/event/1.0" - - return splunkURL, nil -} - -func verifySplunkConnection(l *splunkLogger) error { - req, err := http.NewRequest("OPTIONS", l.url, nil) - if err != nil { - return err - } - res, err := l.client.Do(req) - if err != nil { - return err - } - if res.Body != nil { - defer res.Body.Close() - } - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) - } - return nil -} diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go deleted file mode 100644 index 9561e061c..000000000 --- a/daemon/logger/syslog/syslog.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build linux - -// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. -package syslog - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "strconv" - "strings" - "time" - - syslog "github.com/RackSec/srslog" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - name = "syslog" - secureProto = "tcp+tls" -) - -var facilities = map[string]syslog.Priority{ - "kern": syslog.LOG_KERN, - "user": syslog.LOG_USER, - "mail": syslog.LOG_MAIL, - "daemon": syslog.LOG_DAEMON, - "auth": syslog.LOG_AUTH, - "syslog": syslog.LOG_SYSLOG, - "lpr": syslog.LOG_LPR, - "news": syslog.LOG_NEWS, - "uucp": syslog.LOG_UUCP, - "cron": syslog.LOG_CRON, - "authpriv": syslog.LOG_AUTHPRIV, - "ftp": syslog.LOG_FTP, - "local0": syslog.LOG_LOCAL0, - "local1": syslog.LOG_LOCAL1, - "local2": syslog.LOG_LOCAL2, - "local3": syslog.LOG_LOCAL3, - "local4": syslog.LOG_LOCAL4, - "local5": syslog.LOG_LOCAL5, - "local6": syslog.LOG_LOCAL6, - "local7": syslog.LOG_LOCAL7, -} - -type syslogger struct { - writer *syslog.Writer -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// rsyslog uses appname part of syslog message to fill in an %syslogtag% template -// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 -// tag will be also used as an appname -func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances -// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium -// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) -func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// New creates a syslog logger using the configuration passed in on -// the context. Supported context configuration variables are -// syslog-address, syslog-facility, syslog-format. -func New(ctx logger.Context) (logger.Logger, error) { - tag, err := loggerutils.ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") - if err != nil { - return nil, err - } - - proto, address, err := parseAddress(ctx.Config["syslog-address"]) - if err != nil { - return nil, err - } - - facility, err := parseFacility(ctx.Config["syslog-facility"]) - if err != nil { - return nil, err - } - - syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) - if err != nil { - return nil, err - } - - var log *syslog.Writer - if proto == secureProto { - tlsConfig, tlsErr := parseTLSConfig(ctx.Config) - if tlsErr != nil { - return nil, tlsErr - } - log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) - } else { - log, err = syslog.Dial(proto, address, facility, tag) - } - - if err != nil { - return nil, err - } - - log.SetFormatter(syslogFormatter) - log.SetFramer(syslogFramer) - - return &syslogger{ - writer: log, - }, nil -} - -func (s *syslogger) Log(msg *logger.Message) error { - if msg.Source == "stderr" { - return s.writer.Err(string(msg.Line)) - } - return s.writer.Info(string(msg.Line)) -} - -func (s *syslogger) Close() error { - return s.writer.Close() -} - -func (s *syslogger) Name() string { - return name -} - -func parseAddress(address string) (string, string, error) { - if address == "" { - return "", "", nil - } - if !urlutil.IsTransportURL(address) { - return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return "", "", err - } - - // unix and unixgram socket validation - if url.Scheme == "unix" || url.Scheme == "unixgram" { - if _, err := os.Stat(url.Path); err != nil { - return "", "", err - } - return url.Scheme, url.Path, nil - } - - // here we process tcp|udp - host := url.Host - if _, _, err := net.SplitHostPort(host); err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return "", "", err - } - host = host + ":514" - } - - return url.Scheme, host, nil -} - -// ValidateLogOpt looks for syslog specific log options -// syslog-address, syslog-facility. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "labels": - case "syslog-address": - case "syslog-facility": - case "syslog-tls-ca-cert": - case "syslog-tls-cert": - case "syslog-tls-key": - case "syslog-tls-skip-verify": - case "tag": - case "syslog-format": - default: - return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) - } - } - if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { - return err - } - if _, err := parseFacility(cfg["syslog-facility"]); err != nil { - return err - } - if _, _, err := parseLogFormat(cfg["syslog-format"]); err != nil { - return err - } - return nil -} - -func parseFacility(facility string) (syslog.Priority, error) { - if facility == "" { - return syslog.LOG_DAEMON, nil - } - - if syslogFacility, valid := facilities[facility]; valid { - return syslogFacility, nil - } - - fInt, err := strconv.Atoi(facility) - if err == nil && 0 <= fInt && fInt <= 23 { - return syslog.Priority(fInt << 3), nil - } - - return syslog.Priority(0), errors.New("invalid syslog facility") -} - -func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { - _, skipVerify := cfg["syslog-tls-skip-verify"] - - opts := tlsconfig.Options{ - CAFile: cfg["syslog-tls-ca-cert"], - CertFile: cfg["syslog-tls-cert"], - KeyFile: cfg["syslog-tls-key"], - InsecureSkipVerify: skipVerify, - } - - return tlsconfig.Client(opts) -} - -func parseLogFormat(logFormat string) (syslog.Formatter, syslog.Framer, error) { - switch logFormat { - case "": - return syslog.UnixFormatter, syslog.DefaultFramer, nil - case "rfc3164": - return syslog.RFC3164Formatter, syslog.DefaultFramer, nil - case "rfc5424": - return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - case "rfc5424micro": - return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - default: - return nil, nil, errors.New("Invalid syslog format") - } - -} diff --git a/daemon/logger/syslog/syslog_test.go b/daemon/logger/syslog/syslog_test.go deleted file mode 100644 index f083030b7..000000000 --- a/daemon/logger/syslog/syslog_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -package syslog - -import ( - syslog "github.com/RackSec/srslog" - "reflect" - "testing" -) - -func functionMatches(expectedFun interface{}, actualFun interface{}) bool { - return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() -} - -func TestParseLogFormat(t *testing.T) { - formatter, framer, err := parseLogFormat("rfc5424") - if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || - !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { - t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("rfc5424micro") - if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || - !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { - t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("rfc3164") - if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || - !functionMatches(syslog.DefaultFramer, framer) { - t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("") - if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || - !functionMatches(syslog.DefaultFramer, framer) { - t.Fatal("Failed to parse empty format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("invalid") - if err == nil { - t.Fatal("Failed to parse invalid format", err, formatter, framer) - } -} - -func TestValidateLogOptEmpty(t *testing.T) { - emptyConfig := make(map[string]string) - if err := ValidateLogOpt(emptyConfig); err != nil { - t.Fatal("Failed to parse empty config", err) - } -} diff --git a/daemon/logger/syslog/syslog_unsupported.go b/daemon/logger/syslog/syslog_unsupported.go deleted file mode 100644 index 50cc51b65..000000000 --- a/daemon/logger/syslog/syslog_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package syslog diff --git a/daemon/logs.go b/daemon/logs.go deleted file mode 100644 index 1b285c691..000000000 --- a/daemon/logs.go +++ /dev/null @@ -1,166 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stdcopy" - containertypes "github.com/docker/engine-api/types/container" - timetypes "github.com/docker/engine-api/types/time" -) - -// ContainerLogs hooks up a container's stdout and stderr streams -// configured with the given struct. -func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - - if !(config.ShowStdout || config.ShowStderr) { - return fmt.Errorf("You must choose at least one stream") - } - - cLog, err := daemon.getLogger(container) - if err != nil { - return err - } - logReader, ok := cLog.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported - } - - follow := config.Follow && container.IsRunning() - tailLines, err := strconv.Atoi(config.Tail) - if err != nil { - tailLines = -1 - } - - logrus.Debug("logs: begin stream") - - var since time.Time - if config.Since != "" { - s, n, err := timetypes.ParseTimestamps(config.Since, 0) - if err != nil { - return err - } - since = time.Unix(s, n) - } - readConfig := logger.ReadConfig{ - Since: since, - Tail: tailLines, - Follow: follow, - } - logs := logReader.ReadLogs(readConfig) - - wf := ioutils.NewWriteFlusher(config.OutStream) - defer wf.Close() - close(started) - wf.Flush() - - var outStream io.Writer = wf - errStream := outStream - if !container.Config.Tty { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - for { - select { - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - return nil - case <-ctx.Done(): - logs.Close() - return nil - case msg, ok := <-logs.Msg: - if !ok { - logrus.Debug("logs: end stream") - logs.Close() - if cLog != container.LogDriver { - // Since the logger isn't cached in the container, which occurs if it is running, it - // must get explicitly closed here to avoid leaking it and any file handles it has. - if err := cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - } - return nil - } - logLine := msg.Line - if config.Details { - logLine = append([]byte(msg.Attrs.String()+" "), logLine...) - } - if config.Timestamps { - logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) - } - if msg.Source == "stdout" && config.ShowStdout { - outStream.Write(logLine) - } - if msg.Source == "stderr" && config.ShowStderr { - errStream.Write(logLine) - } - } - } -} - -func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { - if container.LogDriver != nil && container.IsRunning() { - return container.LogDriver, nil - } - return container.StartLogger(container.HostConfig.LogConfig) -} - -// StartLogging initializes and starts the container logging stream. -func (daemon *Daemon) StartLogging(container *container.Container) error { - if container.HostConfig.LogConfig.Type == "none" { - return nil // do not start logging routines - } - - l, err := container.StartLogger(container.HostConfig.LogConfig) - if err != nil { - return fmt.Errorf("Failed to initialize logging driver: %v", err) - } - - copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) - container.LogCopier = copier - copier.Run() - container.LogDriver = l - - // set LogPath field only for json-file logdriver - if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { - container.LogPath = jl.LogPath() - } - - return nil -} - -// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. -func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { - if cfg.Type == "" { - cfg.Type = daemon.defaultLogConfig.Type - } - - if cfg.Config == nil { - cfg.Config = make(map[string]string) - } - - if cfg.Type == daemon.defaultLogConfig.Type { - for k, v := range daemon.defaultLogConfig.Config { - if _, ok := cfg.Config[k]; !ok { - cfg.Config[k] = v - } - } - } - - return logger.ValidateLogOpts(cfg.Type, cfg.Config) -} diff --git a/daemon/logs_test.go b/daemon/logs_test.go deleted file mode 100644 index f0c6f6e1e..000000000 --- a/daemon/logs_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon - -import ( - "testing" - - containertypes "github.com/docker/engine-api/types/container" -) - -func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { - d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} - cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} - if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { - t.Fatal(err) - } -} diff --git a/daemon/monitor.go b/daemon/monitor.go deleted file mode 100644 index f517aa20e..000000000 --- a/daemon/monitor.go +++ /dev/null @@ -1,165 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "io" - "runtime" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/runconfig" -) - -// StateChanged updates daemon state changes from containerd -func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { - c := daemon.containers.Get(id) - if c == nil { - return fmt.Errorf("no such container: %s", id) - } - - switch e.State { - case libcontainerd.StateOOM: - // StateOOM is Linux specific and should never be hit on Windows - if runtime.GOOS == "windows" { - return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") - } - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "oom") - case libcontainerd.StateExit: - c.Lock() - defer c.Unlock() - c.Wait() - c.Reset(false) - c.SetStopped(platformConstructExitStatus(e)) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(e.ExitCode)), - } - daemon.updateHealthMonitor(c) - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.Cleanup(c) - // FIXME: here is race condition between two RUN instructions in Dockerfile - // because they share same runconfig and change image. Must be fixed - // in builder/builder.go - if err := c.ToDisk(); err != nil { - return err - } - return daemon.postRunProcessing(c, e) - case libcontainerd.StateRestart: - c.Lock() - defer c.Unlock() - c.Reset(false) - c.RestartCount++ - c.SetRestarting(platformConstructExitStatus(e)) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(e.ExitCode)), - } - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.updateHealthMonitor(c) - return c.ToDisk() - case libcontainerd.StateExitProcess: - c.Lock() - defer c.Unlock() - if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { - ec := int(e.ExitCode) - execConfig.ExitCode = &ec - execConfig.Running = false - execConfig.Wait() - if err := execConfig.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", c.ID, err) - } - - // remove the exec command from the container's store only and not the - // daemon's store so that the exec command can be inspected. - c.ExecCommands.Delete(execConfig.ID) - } else { - logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) - } - case libcontainerd.StateStart, libcontainerd.StateRestore: - // Container is already locked in this case - c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) - c.HasBeenManuallyStopped = false - if err := c.ToDisk(); err != nil { - c.Reset(false) - return err - } - daemon.initHealthMonitor(c) - daemon.LogContainerEvent(c, "start") - case libcontainerd.StatePause: - // Container is already locked in this case - c.Paused = true - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "pause") - case libcontainerd.StateResume: - // Container is already locked in this case - c.Paused = false - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "unpause") - } - - return nil -} - -// AttachStreams is called by libcontainerd to connect the stdio. -func (daemon *Daemon) AttachStreams(id string, iop libcontainerd.IOPipe) error { - var ( - s *runconfig.StreamConfig - ec *exec.Config - ) - - c := daemon.containers.Get(id) - if c == nil { - var err error - ec, err = daemon.getExecConfig(id) - if err != nil { - return fmt.Errorf("no such exec/container: %s", id) - } - s = ec.StreamConfig - } else { - s = c.StreamConfig - if err := daemon.StartLogging(c); err != nil { - c.Reset(false) - return err - } - } - - if stdin := s.Stdin(); stdin != nil { - if iop.Stdin != nil { - go func() { - io.Copy(iop.Stdin, stdin) - iop.Stdin.Close() - }() - } - } else { - //TODO(swernli): On Windows, not closing stdin when no tty is requested by the exec Config - // results in a hang. We should re-evaluate generalizing this fix for all OSes if - // we can determine that is the right thing to do more generally. - if (c != nil && !c.Config.Tty) || (ec != nil && !ec.Tty && runtime.GOOS == "windows") { - // tty is enabled, so dont close containerd's iopipe stdin. - if iop.Stdin != nil { - iop.Stdin.Close() - } - } - } - - copyFunc := func(w io.Writer, r io.Reader) { - s.Add(1) - go func() { - if _, err := io.Copy(w, r); err != nil { - logrus.Errorf("%v stream copy error: %v", id, err) - } - s.Done() - }() - } - - if iop.Stdout != nil { - copyFunc(s.Stdout(), iop.Stdout) - } - if iop.Stderr != nil { - copyFunc(s.Stderr(), iop.Stderr) - } - - return nil -} diff --git a/daemon/monitor_linux.go b/daemon/monitor_linux.go deleted file mode 100644 index 09f5af50c..000000000 --- a/daemon/monitor_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - OOMKilled: e.OOMKilled, - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go deleted file mode 100644 index 5ccfada76..000000000 --- a/daemon/monitor_solaris.go +++ /dev/null @@ -1,18 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/daemon/monitor_windows.go b/daemon/monitor_windows.go deleted file mode 100644 index b500ee60b..000000000 --- a/daemon/monitor_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - if e.ExitCode == 0 && e.UpdatePending { - spec, err := daemon.createSpec(container) - if err != nil { - return err - } - - servicingOption := &libcontainerd.ServicingOption{ - IsServicing: true, - } - - // Create a new servicing container, which will start, complete the update, and merge back the - // results if it succeeded, all as part of the below function call. - if err := daemon.containerd.Create((container.ID + "_servicing"), *spec, servicingOption); err != nil { - container.SetExitCode(-1) - return fmt.Errorf("Post-run update servicing failed: %s", err) - } - } - return nil -} diff --git a/daemon/mounts.go b/daemon/mounts.go deleted file mode 100644 index d4f24b281..000000000 --- a/daemon/mounts.go +++ /dev/null @@ -1,48 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/container" - volumestore "github.com/docker/docker/volume/store" -) - -func (daemon *Daemon) prepareMountPoints(container *container.Container) error { - for _, config := range container.MountPoints { - if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { - return err - } - } - return nil -} - -func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { - var rmErrors []string - for _, m := range container.MountPoints { - if m.Volume == nil { - continue - } - daemon.volumes.Dereference(m.Volume, container.ID) - if rm { - // Do not remove named mountpoints - // these are mountpoints specified like `docker run -v :/foo` - if m.Named { - continue - } - err := daemon.volumes.Remove(m.Volume) - // Ignore volume in use errors because having this - // volume being referenced by other container is - // not an error, but an implementation detail. - // This prevents docker from logging "ERROR: Volume in use" - // where there is another container using the volume. - if err != nil && !volumestore.IsInUse(err) { - rmErrors = append(rmErrors, err.Error()) - } - } - } - if len(rmErrors) > 0 { - return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) - } - return nil -} diff --git a/daemon/names.go b/daemon/names.go deleted file mode 100644 index feb1323bb..000000000 --- a/daemon/names.go +++ /dev/null @@ -1,115 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/utils" -) - -var ( - validContainerNameChars = utils.RestrictedNameChars - validContainerNamePattern = utils.RestrictedNamePattern -) - -func (daemon *Daemon) registerName(container *container.Container) error { - if daemon.Exists(container.ID) { - return fmt.Errorf("Container is already loaded") - } - if err := validateID(container.ID); err != nil { - return err - } - if container.Name == "" { - name, err := daemon.generateNewName(container.ID) - if err != nil { - return err - } - container.Name = name - - if err := container.ToDiskLocking(); err != nil { - logrus.Errorf("Error saving container name to disk: %v", err) - } - } - return daemon.nameIndex.Reserve(container.Name, container.ID) -} - -func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { - var ( - err error - id = stringid.GenerateNonCryptoID() - ) - - if name == "" { - if name, err = daemon.generateNewName(id); err != nil { - return "", "", err - } - return id, name, nil - } - - if name, err = daemon.reserveName(id, name); err != nil { - return "", "", err - } - - return id, name, nil -} - -func (daemon *Daemon) reserveName(id, name string) (string, error) { - if !validContainerNamePattern.MatchString(name) { - return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) - } - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - id, err := daemon.nameIndex.Get(name) - if err != nil { - logrus.Errorf("got unexpected error while looking up reserved name: %v", err) - return "", err - } - return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) - } - return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) - } - return name, nil -} - -func (daemon *Daemon) releaseName(name string) { - daemon.nameIndex.Release(name) -} - -func (daemon *Daemon) generateNewName(id string) (string, error) { - var name string - for i := 0; i < 6; i++ { - name = namesgenerator.GetRandomName(i) - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - continue - } - return "", err - } - return name, nil - } - - name = "/" + stringid.TruncateID(id) - if err := daemon.nameIndex.Reserve(name, id); err != nil { - return "", err - } - return name, nil -} - -func validateID(id string) error { - if id == "" { - return fmt.Errorf("Invalid empty id") - } - return nil -} diff --git a/daemon/network.go b/daemon/network.go deleted file mode 100644 index 57ec395e2..000000000 --- a/daemon/network.go +++ /dev/null @@ -1,381 +0,0 @@ -package daemon - -import ( - "fmt" - "net" - "strings" - - "github.com/Sirupsen/logrus" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/docker/errors" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" - networktypes "github.com/docker/libnetwork/types" -) - -// NetworkControllerEnabled checks if the networking stack is enabled. -// This feature depends on OS primitives and it's disabled in systems like Windows. -func (daemon *Daemon) NetworkControllerEnabled() bool { - return daemon.netController != nil -} - -// FindNetwork function finds a network for a given string that can represent network name or id -func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { - // Find by Name - n, err := daemon.GetNetworkByName(idName) - if err != nil && !isNoSuchNetworkError(err) { - return nil, err - } - - if n != nil { - return n, nil - } - - // Find by id - return daemon.GetNetworkByID(idName) -} - -func isNoSuchNetworkError(err error) bool { - _, ok := err.(libnetwork.ErrNoSuchNetwork) - return ok -} - -// GetNetworkByID function returns a network whose ID begins with the given prefix. -// It fails with an error if no matching, or more than one matching, networks are found. -func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { - list := daemon.GetNetworksByID(partialID) - - if len(list) == 0 { - return nil, libnetwork.ErrNoSuchNetwork(partialID) - } - if len(list) > 1 { - return nil, libnetwork.ErrInvalidID(partialID) - } - return list[0], nil -} - -// GetNetworkByName function returns a network for a given network name. -func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { - c := daemon.netController - if c == nil { - return nil, libnetwork.ErrNoSuchNetwork(name) - } - if name == "" { - name = c.Config().Daemon.DefaultNetwork - } - return c.NetworkByName(name) -} - -// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks -func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { - c := daemon.netController - if c == nil { - return nil - } - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - if strings.HasPrefix(nw.ID(), partialID) { - list = append(list, nw) - } - return false - } - c.WalkNetworks(l) - - return list -} - -// getAllNetworks returns a list containing all networks -func (daemon *Daemon) getAllNetworks() []libnetwork.Network { - c := daemon.netController - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - list = append(list, nw) - return false - } - c.WalkNetworks(l) - - return list -} - -func isIngressNetwork(name string) bool { - return name == "ingress" -} - -var ingressChan = make(chan struct{}, 1) - -func ingressWait() func() { - ingressChan <- struct{}{} - return func() { <-ingressChan } -} - -// SetupIngress setups ingress networking. -func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { - ip, _, err := net.ParseCIDR(nodeIP) - if err != nil { - return err - } - - go func() { - controller := daemon.netController - controller.AgentInitWait() - - if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { - if err := controller.SandboxDestroy("ingress-sbox"); err != nil { - logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) - return - } - - // Cleanup any stale endpoints that might be left over during previous iterations - epList := n.Endpoints() - for _, ep := range epList { - if err := ep.Delete(true); err != nil { - logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) - } - } - - if err := n.Delete(); err != nil { - logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) - return - } - } - - if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { - // If it is any other error other than already - // exists error log error and return. - if _, ok := err.(libnetwork.NetworkNameError); !ok { - logrus.Errorf("Failed creating ingress network: %v", err) - return - } - - // Otherwise continue down the call to create or recreate sandbox. - } - - n, err := daemon.GetNetworkByID(create.ID) - if err != nil { - logrus.Errorf("Failed getting ingress network by id after creating: %v", err) - return - } - - sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) - if err != nil { - if _, ok := err.(networktypes.ForbiddenError); !ok { - logrus.Errorf("Failed creating ingress sandbox: %v", err) - } - return - } - - ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) - if err != nil { - logrus.Errorf("Failed creating ingress endpoint: %v", err) - return - } - - if err := ep.Join(sb, nil); err != nil { - logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) - } - }() - - return nil -} - -// SetNetworkBootstrapKeys sets the bootstrap keys. -func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { - return daemon.netController.SetKeys(keys) -} - -// CreateManagedNetwork creates an agent network. -func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { - _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) - return err -} - -// CreateNetwork creates a network with the given name, driver and other optional parameters -func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { - resp, err := daemon.createNetwork(create, "", false) - if err != nil { - return nil, err - } - return resp, err -} - -func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { - // If there is a pending ingress network creation wait here - // since ingress network creation can happen via node download - // from manager or task download. - if isIngressNetwork(create.Name) { - defer ingressWait()() - } - - if runconfig.IsPreDefinedNetwork(create.Name) && !agent { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) - return nil, errors.NewRequestForbiddenError(err) - } - - var warning string - nw, err := daemon.GetNetworkByName(create.Name) - if err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - return nil, err - } - } - if nw != nil { - if create.CheckDuplicate { - return nil, libnetwork.NetworkNameError(create.Name) - } - warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) - } - - c := daemon.netController - driver := create.Driver - if driver == "" { - driver = c.Config().Daemon.DefaultDriver - } - - ipam := create.IPAM - v4Conf, v6Conf, err := getIpamConfig(ipam.Config) - if err != nil { - return nil, err - } - - nwOptions := []libnetwork.NetworkOption{ - libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options), - libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(create.Options), - libnetwork.NetworkOptionLabels(create.Labels), - } - if create.Internal { - nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) - } - if agent { - nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) - nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) - } - - if isIngressNetwork(create.Name) { - nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) - } - - n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) - if err != nil { - return nil, err - } - - daemon.LogNetworkEvent(n, "create") - return &types.NetworkCreateResponse{ - ID: n.ID(), - Warning: warning, - }, nil -} - -func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { - ipamV4Cfg := []*libnetwork.IpamConf{} - ipamV6Cfg := []*libnetwork.IpamConf{} - for _, d := range data { - iCfg := libnetwork.IpamConf{} - iCfg.PreferredPool = d.Subnet - iCfg.SubPool = d.IPRange - iCfg.Gateway = d.Gateway - iCfg.AuxAddresses = d.AuxAddress - ip, _, err := net.ParseCIDR(d.Subnet) - if err != nil { - return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) - } - if ip.To4() != nil { - ipamV4Cfg = append(ipamV4Cfg, &iCfg) - } else { - ipamV6Cfg = append(ipamV6Cfg, &iCfg) - } - } - return ipamV4Cfg, ipamV6Cfg, nil -} - -// UpdateContainerServiceConfig updates a service configuration. -func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - - container.NetworkSettings.Service = serviceConfig - return nil -} - -// ConnectContainerToNetwork connects the given container to the given -// network. If either cannot be found, an err is returned. If the -// network cannot be set up, an err is returned. -func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - return daemon.ConnectToNetwork(container, networkName, endpointConfig) -} - -// DisconnectContainerFromNetwork disconnects the given container from -// the given network. If either cannot be found, an err is returned. -func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - if force { - return daemon.ForceEndpointDelete(containerName, network) - } - return err - } - return daemon.DisconnectFromNetwork(container, network, force) -} - -// GetNetworkDriverList returns the list of plugins drivers -// registered for network. -func (daemon *Daemon) GetNetworkDriverList() map[string]bool { - pluginList := make(map[string]bool) - - if !daemon.NetworkControllerEnabled() { - return nil - } - c := daemon.netController - networks := c.Networks() - - for _, network := range networks { - driver := network.Type() - pluginList[driver] = true - } - // TODO : Replace this with proper libnetwork API - pluginList["overlay"] = true - - return pluginList -} - -// DeleteManagedNetwork deletes an agent network. -func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, true) -} - -// DeleteNetwork destroys a network unless it's one of docker's predefined networks. -func (daemon *Daemon) DeleteNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, false) -} - -func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { - nw, err := daemon.FindNetwork(networkID) - if err != nil { - return err - } - - if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { - err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) - return errors.NewRequestForbiddenError(err) - } - - if err := nw.Delete(); err != nil { - return err - } - daemon.LogNetworkEvent(nw, "destroy") - return nil -} - -// GetNetworks returns a list of all networks -func (daemon *Daemon) GetNetworks() []libnetwork.Network { - return daemon.getAllNetworks() -} diff --git a/daemon/network/settings.go b/daemon/network/settings.go deleted file mode 100644 index ff27cb0bb..000000000 --- a/daemon/network/settings.go +++ /dev/null @@ -1,24 +0,0 @@ -package network - -import ( - clustertypes "github.com/docker/docker/daemon/cluster/provider" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" -) - -// Settings stores configuration details about the daemon network config -// TODO Windows. Many of these fields can be factored out., -type Settings struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Networks map[string]*networktypes.EndpointSettings - Service *clustertypes.ServiceConfig - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []networktypes.Address - SecondaryIPv6Addresses []networktypes.Address - IsAnonymousEndpoint bool -} diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go deleted file mode 100644 index 4459d02fc..000000000 --- a/daemon/oci_linux.go +++ /dev/null @@ -1,711 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/specs/specs-go" -) - -func setResources(s *specs.Spec, r containertypes.Resources) error { - weightDevices, err := getBlkioWeightDevices(r) - if err != nil { - return err - } - readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) - if err != nil { - return err - } - writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) - if err != nil { - return err - } - readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) - if err != nil { - return err - } - writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) - if err != nil { - return err - } - - memoryRes := getMemoryResources(r) - cpuRes := getCPUResources(r) - blkioWeight := r.BlkioWeight - - specResources := &specs.Resources{ - Memory: memoryRes, - CPU: cpuRes, - BlockIO: &specs.BlockIO{ - Weight: &blkioWeight, - WeightDevice: weightDevices, - ThrottleReadBpsDevice: readBpsDevice, - ThrottleWriteBpsDevice: writeBpsDevice, - ThrottleReadIOPSDevice: readIOpsDevice, - ThrottleWriteIOPSDevice: writeIOpsDevice, - }, - DisableOOMKiller: r.OomKillDisable, - Pids: &specs.Pids{ - Limit: &r.PidsLimit, - }, - } - - if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { - specResources.Devices = s.Linux.Resources.Devices - } - - s.Linux.Resources = specResources - return nil -} - -func setDevices(s *specs.Spec, c *container.Container) error { - // Build lists of devices allowed and created within the container. - var devs []specs.Device - devPermissions := s.Linux.Resources.Devices - if c.HostConfig.Privileged { - hostDevices, err := devices.HostDevices() - if err != nil { - return err - } - for _, d := range hostDevices { - devs = append(devs, specDevice(d)) - } - rwm := "rwm" - devPermissions = []specs.DeviceCgroup{ - { - Allow: true, - Access: &rwm, - }, - } - } else { - for _, deviceMapping := range c.HostConfig.Devices { - d, dPermissions, err := getDevicesFromPath(deviceMapping) - if err != nil { - return err - } - devs = append(devs, d...) - devPermissions = append(devPermissions, dPermissions...) - } - } - - s.Linux.Devices = append(s.Linux.Devices, devs...) - s.Linux.Resources.Devices = devPermissions - return nil -} - -func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { - var rlimits []specs.Rlimit - - ulimits := c.HostConfig.Ulimits - // Merge ulimits with daemon defaults - ulIdx := make(map[string]struct{}) - for _, ul := range ulimits { - ulIdx[ul.Name] = struct{}{} - } - for name, ul := range daemon.configStore.Ulimits { - if _, exists := ulIdx[name]; !exists { - ulimits = append(ulimits, ul) - } - } - - for _, ul := range ulimits { - rlimits = append(rlimits, specs.Rlimit{ - Type: "RLIMIT_" + strings.ToUpper(ul.Name), - Soft: uint64(ul.Soft), - Hard: uint64(ul.Hard), - }) - } - - s.Process.Rlimits = rlimits - return nil -} - -func setUser(s *specs.Spec, c *container.Container) error { - uid, gid, additionalGids, err := getUser(c, c.Config.User) - if err != nil { - return err - } - s.Process.User.UID = uid - s.Process.User.GID = gid - s.Process.User.AdditionalGids = additionalGids - return nil -} - -func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { - fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) - if err != nil { - return nil, err - } - return os.Open(fp) -} - -func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { - passwdPath, err := user.GetPasswdPath() - if err != nil { - return 0, 0, nil, err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return 0, 0, nil, err - } - passwdFile, err := readUserFile(c, passwdPath) - if err == nil { - defer passwdFile.Close() - } - groupFile, err := readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - - execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) - if err != nil { - return 0, 0, nil, err - } - - // todo: fix this double read by a change to libcontainer/user pkg - groupFile, err = readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - var addGroups []int - if len(c.HostConfig.GroupAdd) > 0 { - addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) - if err != nil { - return 0, 0, nil, err - } - } - uid := uint32(execUser.Uid) - gid := uint32(execUser.Gid) - sgids := append(execUser.Sgids, addGroups...) - var additionalGids []uint32 - for _, g := range sgids { - additionalGids = append(additionalGids, uint32(g)) - } - return uid, gid, additionalGids, nil -} - -func setNamespace(s *specs.Spec, ns specs.Namespace) { - for i, n := range s.Linux.Namespaces { - if n.Type == ns.Type { - s.Linux.Namespaces[i] = ns - return - } - } - s.Linux.Namespaces = append(s.Linux.Namespaces, ns) -} - -func setCapabilities(s *specs.Spec, c *container.Container) error { - var caplist []string - var err error - if c.HostConfig.Privileged { - caplist = caps.GetAllCapabilities() - } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) - if err != nil { - return err - } - } - s.Process.Capabilities = caplist - return nil -} - -func delNamespace(s *specs.Spec, nsType specs.NamespaceType) { - idx := -1 - for i, n := range s.Linux.Namespaces { - if n.Type == nsType { - idx = i - } - } - if idx >= 0 { - s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) - } -} - -func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { - userNS := false - // user - if c.HostConfig.UsernsMode.IsPrivate() { - uidMap, gidMap := daemon.GetUIDGIDMaps() - if uidMap != nil { - userNS = true - ns := specs.Namespace{Type: "user"} - setNamespace(s, ns) - s.Linux.UIDMappings = specMapping(uidMap) - s.Linux.GIDMappings = specMapping(gidMap) - } - } - // network - if !c.Config.NetworkDisabled { - ns := specs.Namespace{Type: "network"} - parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) - if parts[0] == "container" { - nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) - if userNS { - // to share a net namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.NetworkMode.IsHost() { - ns.Path = c.NetworkSettings.SandboxKey - } - setNamespace(s, ns) - } - // ipc - if c.HostConfig.IpcMode.IsContainer() { - ns := specs.Namespace{Type: "ipc"} - ic, err := daemon.getIpcContainer(c) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share an IPC namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.IpcMode.IsHost() { - delNamespace(s, specs.NamespaceType("ipc")) - } else { - ns := specs.Namespace{Type: "ipc"} - setNamespace(s, ns) - } - // pid - if c.HostConfig.PidMode.IsContainer() { - ns := specs.Namespace{Type: "pid"} - pc, err := daemon.getPidContainer(c) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share a PID namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.PidMode.IsHost() { - delNamespace(s, specs.NamespaceType("pid")) - } else { - ns := specs.Namespace{Type: "pid"} - setNamespace(s, ns) - } - // uts - if c.HostConfig.UTSMode.IsHost() { - delNamespace(s, specs.NamespaceType("uts")) - s.Hostname = "" - } - - return nil -} - -func specMapping(s []idtools.IDMap) []specs.IDMapping { - var ids []specs.IDMapping - for _, item := range s { - ids = append(ids, specs.IDMapping{ - HostID: uint32(item.HostID), - ContainerID: uint32(item.ContainerID), - Size: uint32(item.Size), - }) - } - return ids -} - -func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { - for _, m := range mountinfo { - if m.Mountpoint == dir { - return m - } - } - return nil -} - -// Get the source mount point of directory passed in as argument. Also return -// optional fields. -func getSourceMount(source string) (string, string, error) { - // Ensure any symlinks are resolved. - sourcePath, err := filepath.EvalSymlinks(source) - if err != nil { - return "", "", err - } - - mountinfos, err := mount.GetMounts() - if err != nil { - return "", "", err - } - - mountinfo := getMountInfo(mountinfos, sourcePath) - if mountinfo != nil { - return sourcePath, mountinfo.Optional, nil - } - - path := sourcePath - for { - path = filepath.Dir(path) - - mountinfo = getMountInfo(mountinfos, path) - if mountinfo != nil { - return path, mountinfo.Optional, nil - } - - if path == "/" { - break - } - } - - // If we are here, we did not find parent mount. Something is wrong. - return "", "", fmt.Errorf("Could not find source mount of %s", source) -} - -// Ensure mount point on which path is mounted, is shared. -func ensureShared(path string) error { - sharedMount := false - - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } - } - - if !sharedMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) - } - return nil -} - -// Ensure mount point on which path is mounted, is either shared or slave. -func ensureSharedOrSlave(path string) error { - sharedMount := false - slaveMount := false - - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } else if strings.HasPrefix(opt, "master:") { - slaveMount = true - break - } - } - - if !sharedMount && !slaveMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) - } - return nil -} - -var ( - mountPropagationMap = map[string]int{ - "private": mount.PRIVATE, - "rprivate": mount.RPRIVATE, - "shared": mount.SHARED, - "rshared": mount.RSHARED, - "slave": mount.SLAVE, - "rslave": mount.RSLAVE, - } - - mountPropagationReverseMap = map[int]string{ - mount.PRIVATE: "private", - mount.RPRIVATE: "rprivate", - mount.SHARED: "shared", - mount.RSHARED: "rshared", - mount.SLAVE: "slave", - mount.RSLAVE: "rslave", - } -) - -func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { - userMounts := make(map[string]struct{}) - for _, m := range mounts { - userMounts[m.Destination] = struct{}{} - } - - // Filter out mounts that are overridden by user supplied mounts - var defaultMounts []specs.Mount - _, mountDev := userMounts["/dev"] - for _, m := range s.Mounts { - if _, ok := userMounts[m.Destination]; !ok { - if mountDev && strings.HasPrefix(m.Destination, "/dev/") { - continue - } - defaultMounts = append(defaultMounts, m) - } - } - - s.Mounts = defaultMounts - for _, m := range mounts { - for _, cm := range s.Mounts { - if cm.Destination == m.Destination { - return fmt.Errorf("Duplicate mount point '%s'", m.Destination) - } - } - - if m.Source == "tmpfs" { - data := c.HostConfig.Tmpfs[m.Destination] - options := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode} - if data != "" { - options = append(options, strings.Split(data, ",")...) - } - - merged, err := mount.MergeTmpfsOptions(options) - if err != nil { - return err - } - - s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) - continue - } - - mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} - - // Determine property of RootPropagation based on volume - // properties. If a volume is shared, then keep root propagation - // shared. This should work for slave and private volumes too. - // - // For slave volumes, it can be either [r]shared/[r]slave. - // - // For private volumes any root propagation value should work. - pFlag := mountPropagationMap[m.Propagation] - if pFlag == mount.SHARED || pFlag == mount.RSHARED { - if err := ensureShared(m.Source); err != nil { - return err - } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] - } - } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { - if err := ensureSharedOrSlave(m.Source); err != nil { - return err - } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] - } - } - - opts := []string{"rbind"} - if !m.Writable { - opts = append(opts, "ro") - } - if pFlag != 0 { - opts = append(opts, mountPropagationReverseMap[pFlag]) - } - - mt.Options = opts - s.Mounts = append(s.Mounts, mt) - } - - if s.Root.Readonly { - for i, m := range s.Mounts { - switch m.Destination { - case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc - continue - } - if _, ok := userMounts[m.Destination]; !ok { - if !stringutils.InSlice(m.Options, "ro") { - s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") - } - } - } - } - - if c.HostConfig.Privileged { - if !s.Root.Readonly { - // clear readonly for /sys - for i := range s.Mounts { - if s.Mounts[i].Destination == "/sys" { - clearReadOnly(&s.Mounts[i]) - } - } - } - s.Linux.ReadonlyPaths = nil - s.Linux.MaskedPaths = nil - } - - // TODO: until a kernel/mount solution exists for handling remount in a user namespace, - // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) - if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { - for i, m := range s.Mounts { - if m.Type == "cgroup" { - clearReadOnly(&s.Mounts[i]) - } - } - } - - return nil -} - -func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return err - } - s.Root = specs.Root{ - Path: c.BaseFS, - Readonly: c.HostConfig.ReadonlyRootfs, - } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return err - } - cwd := c.Config.WorkingDir - if len(cwd) == 0 { - cwd = "/" - } - s.Process.Args = append([]string{c.Path}, c.Args...) - s.Process.Cwd = cwd - s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) - s.Process.Terminal = c.Config.Tty - s.Hostname = c.FullHostname() - - return nil -} - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - if err := daemon.populateCommonSpec(&s, c); err != nil { - return nil, err - } - - var cgroupsPath string - scopePrefix := "docker" - parent := "/docker" - useSystemd := UsingSystemd(daemon.configStore) - if useSystemd { - parent = "system.slice" - } - - if c.HostConfig.CgroupParent != "" { - parent = c.HostConfig.CgroupParent - } else if daemon.configStore.CgroupParent != "" { - parent = daemon.configStore.CgroupParent - } - - if useSystemd { - cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID - logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) - } else { - cgroupsPath = filepath.Join(parent, c.ID) - } - s.Linux.CgroupsPath = &cgroupsPath - - if err := setResources(&s, c.HostConfig.Resources); err != nil { - return nil, fmt.Errorf("linux runtime spec resources: %v", err) - } - s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj - s.Linux.Sysctl = c.HostConfig.Sysctls - if err := setDevices(&s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec devices: %v", err) - } - if err := setRlimits(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) - } - if err := setUser(&s, c); err != nil { - return nil, fmt.Errorf("linux spec user: %v", err) - } - if err := setNamespaces(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux spec namespaces: %v", err) - } - if err := setCapabilities(&s, c); err != nil { - return nil, fmt.Errorf("linux spec capabilities: %v", err) - } - if err := setSeccomp(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux seccomp: %v", err) - } - - if err := daemon.setupIpcDirs(c); err != nil { - return nil, err - } - - ms, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - ms = append(ms, c.IpcMounts()...) - ms = append(ms, c.TmpfsMounts()...) - sort.Sort(mounts(ms)) - if err := setMounts(daemon, &s, c, ms); err != nil { - return nil, fmt.Errorf("linux mounts: %v", err) - } - - for _, ns := range s.Linux.Namespaces { - if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { - target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) - if err != nil { - return nil, err - } - - s.Hooks = specs.Hooks{ - Prestart: []specs.Hook{{ - Path: target, // FIXME: cross-platform - Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, - }}, - } - } - } - - if apparmor.IsEnabled() { - appArmorProfile := "docker-default" - if len(c.AppArmorProfile) > 0 { - appArmorProfile = c.AppArmorProfile - } else if c.HostConfig.Privileged { - appArmorProfile = "unconfined" - } - s.Process.ApparmorProfile = appArmorProfile - } - s.Process.SelinuxLabel = c.GetProcessLabel() - s.Process.NoNewPrivileges = c.NoNewPrivileges - s.Linux.MountLabel = c.MountLabel - - return (*libcontainerd.Spec)(&s), nil -} - -func clearReadOnly(m *specs.Mount) { - var opt []string - for _, o := range m.Options { - if o != "ro" { - opt = append(opt, o) - } - } - m.Options = opt -} diff --git a/daemon/oci_solaris.go b/daemon/oci_solaris.go deleted file mode 100644 index 05eca2116..000000000 --- a/daemon/oci_solaris.go +++ /dev/null @@ -1,12 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/oci" -) - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - return (*libcontainerd.Spec)(&s), nil -} diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go deleted file mode 100644 index 571f27391..000000000 --- a/daemon/oci_windows.go +++ /dev/null @@ -1,200 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/libcontainerd/windowsoci" - "github.com/docker/docker/oci" -) - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return nil, err - } - - // TODO Windows - this can be removed. Not used (UID/GID) - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return nil, err - } - - img, err := daemon.imageStore.Get(c.ImageID) - if err != nil { - return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) - } - - s.Platform.OSVersion = img.OSVersion - - // In base spec - s.Hostname = c.FullHostname() - - // In s.Mounts - mounts, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - for _, mount := range mounts { - s.Mounts = append(s.Mounts, windowsoci.Mount{ - Source: mount.Source, - Destination: mount.Destination, - Readonly: !mount.Writable, - }) - } - - // In s.Process - s.Process.Args = append([]string{c.Path}, c.Args...) - if !c.Config.ArgsEscaped { - s.Process.Args = escapeArgs(s.Process.Args) - } - s.Process.Cwd = c.Config.WorkingDir - if len(s.Process.Cwd) == 0 { - // We default to C:\ to workaround the oddity of the case that the - // default directory for cmd running as LocalSystem (or - // ContainerAdministrator) is c:\windows\system32. Hence docker run - // cmd will by default end in c:\windows\system32, rather - // than 'root' (/) on Linux. The oddity is that if you have a dockerfile - // which has no WORKDIR and has a COPY file ., . will be interpreted - // as c:\. Hence, setting it to default of c:\ makes for consistency. - s.Process.Cwd = `C:\` - } - s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) - s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize - s.Process.Terminal = c.Config.Tty - s.Process.User.User = c.Config.User - - // In spec.Root - s.Root.Path = c.BaseFS - s.Root.Readonly = c.HostConfig.ReadonlyRootfs - - // In s.Windows - s.Windows.FirstStart = !c.HasBeenStartedBefore - - // s.Windows.LayerFolder. - m, err := c.RWLayer.Metadata() - if err != nil { - return nil, fmt.Errorf("Failed to get layer metadata - %s", err) - } - s.Windows.LayerFolder = m["dir"] - - // s.Windows.LayerPaths - var layerPaths []string - if img.RootFS != nil && (img.RootFS.Type == image.TypeLayers || img.RootFS.Type == image.TypeLayersWithBase) { - // Get the layer path for each layer. - start := 1 - if img.RootFS.Type == image.TypeLayersWithBase { - // Include an empty slice to get the base layer ID. - start = 0 - } - max := len(img.RootFS.DiffIDs) - for i := start; i <= max; i++ { - img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] - path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) - if err != nil { - return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) - } - // Reverse order, expecting parent most first - layerPaths = append([]string{path}, layerPaths...) - } - } - s.Windows.LayerPaths = layerPaths - - // Are we going to run as a Hyper-V container? - hv := false - if c.HostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - hv = daemon.defaultIsolation.IsHyperV() - } else { - // Container is requesting an isolation mode. Honour it. - hv = c.HostConfig.Isolation.IsHyperV() - } - if hv { - hvr := &windowsoci.HvRuntime{} - if img.RootFS != nil && img.RootFS.Type == image.TypeLayers { - // For TP5, the utility VM is part of the base layer. - // TODO-jstarks: Add support for separate utility VM images - // once it is decided how they can be stored. - uvmpath := filepath.Join(layerPaths[len(layerPaths)-1], "UtilityVM") - _, err = os.Stat(uvmpath) - if err != nil { - if os.IsNotExist(err) { - err = errors.New("container image does not contain a utility VM") - } - return nil, err - } - - hvr.ImagePath = uvmpath - } - - s.Windows.HvRuntime = hvr - } - - // In s.Windows.Networking - // Connect all the libnetwork allocated networks to the container - var epList []string - if c.NetworkSettings != nil { - for n := range c.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(n) - if err != nil { - continue - } - - ep, err := c.GetEndpointInNetwork(sn) - if err != nil { - continue - } - - data, err := ep.DriverInfo() - if err != nil { - continue - } - if data["hnsid"] != nil { - epList = append(epList, data["hnsid"].(string)) - } - } - } - s.Windows.Networking = &windowsoci.Networking{ - EndpointList: epList, - } - - // In s.Windows.Resources - // @darrenstahlmsft implement these resources - cpuShares := uint64(c.HostConfig.CPUShares) - s.Windows.Resources = &windowsoci.Resources{ - CPU: &windowsoci.CPU{ - Percent: &c.HostConfig.CPUPercent, - Shares: &cpuShares, - }, - Memory: &windowsoci.Memory{ - Limit: &c.HostConfig.Memory, - //TODO Reservation: ..., - }, - Network: &windowsoci.Network{ - //TODO Bandwidth: ..., - }, - Storage: &windowsoci.Storage{ - Bps: &c.HostConfig.IOMaximumBandwidth, - Iops: &c.HostConfig.IOMaximumIOps, - //TODO SandboxSize: ..., - }, - } - return (*libcontainerd.Spec)(&s), nil -} - -func escapeArgs(args []string) []string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = syscall.EscapeArg(a) - } - return escapedArgs -} diff --git a/daemon/pause.go b/daemon/pause.go deleted file mode 100644 index dbfafbc5f..000000000 --- a/daemon/pause.go +++ /dev/null @@ -1,49 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerPause pauses a container -func (daemon *Daemon) ContainerPause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if err := daemon.containerPause(container); err != nil { - return err - } - - return nil -} - -// containerPause pauses the container execution without stopping the process. -// The execution can be resumed by calling containerUnpause. -func (daemon *Daemon) containerPause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot Pause the container which is not running - if !container.Running { - return errNotRunning{container.ID} - } - - // We cannot Pause the container which is already paused - if container.Paused { - return fmt.Errorf("Container %s is already paused", container.ID) - } - - // We cannot Pause the container which is restarting - if container.Restarting { - return errContainerIsRestarting(container.ID) - } - - if err := daemon.containerd.Pause(container.ID); err != nil { - return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) - } - - return nil -} diff --git a/daemon/rename.go b/daemon/rename.go deleted file mode 100644 index e92dfda86..000000000 --- a/daemon/rename.go +++ /dev/null @@ -1,98 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork" -) - -// ContainerRename changes the name of a container, using the oldName -// to find the container. An error is returned if newName is already -// reserved. -func (daemon *Daemon) ContainerRename(oldName, newName string) error { - var ( - sid string - sb libnetwork.Sandbox - ) - - if oldName == "" || newName == "" { - return fmt.Errorf("Neither old nor new names may be empty") - } - - if newName[0] != '/' { - newName = "/" + newName - } - - container, err := daemon.GetContainer(oldName) - if err != nil { - return err - } - - oldName = container.Name - oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint - - if oldName == newName { - return fmt.Errorf("Renaming a container with the same name as its current name") - } - - container.Lock() - defer container.Unlock() - - if newName, err = daemon.reserveName(container.ID, newName); err != nil { - return fmt.Errorf("Error when allocating new name: %v", err) - } - - container.Name = newName - container.NetworkSettings.IsAnonymousEndpoint = false - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - daemon.reserveName(container.ID, oldName) - daemon.releaseName(newName) - } - }() - - daemon.releaseName(oldName) - if err = container.ToDisk(); err != nil { - return err - } - - attributes := map[string]string{ - "oldName": oldName, - } - - if !container.Running { - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil - } - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.ToDisk(); e != nil { - logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) - } - } - }() - - sid = container.NetworkSettings.SandboxID - if daemon.netController != nil { - sb, err = daemon.netController.SandboxByID(sid) - if err != nil { - return err - } - - err = sb.Rename(strings.TrimPrefix(container.Name, "/")) - if err != nil { - return err - } - } - - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil -} diff --git a/daemon/resize.go b/daemon/resize.go deleted file mode 100644 index 747353852..000000000 --- a/daemon/resize.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/libcontainerd" -) - -// ContainerResize changes the size of the TTY of the process running -// in the container with the given name to the given height and width. -func (daemon *Daemon) ContainerResize(name string, height, width int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if !container.IsRunning() { - return errNotRunning{container.ID} - } - - if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { - attributes := map[string]string{ - "height": fmt.Sprintf("%d", height), - "width": fmt.Sprintf("%d", width), - } - daemon.LogContainerEventWithAttributes(container, "resize", attributes) - } - return err -} - -// ContainerExecResize changes the size of the TTY of the process -// running in the exec with the given name to the given height and -// width. -func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { - ec, err := daemon.getExecConfig(name) - if err != nil { - return err - } - return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) -} diff --git a/daemon/restart.go b/daemon/restart.go deleted file mode 100644 index 3779116cf..000000000 --- a/daemon/restart.go +++ /dev/null @@ -1,48 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerRestart stops and starts a container. It attempts to -// gracefully stop the container within the given timeout, forcefully -// stopping it if the timeout is exceeded. If given a negative -// timeout, ContainerRestart will wait forever until a graceful -// stop. Returns an error if the container cannot be found, or if -// there is an underlying error at any stage of the restart. -func (daemon *Daemon) ContainerRestart(name string, seconds int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if err := daemon.containerRestart(container, seconds); err != nil { - return fmt.Errorf("Cannot restart container %s: %v", name, err) - } - return nil -} - -// containerRestart attempts to gracefully stop and then start the -// container. When stopping, wait for the given duration in seconds to -// gracefully stop, before forcefully terminating the container. If -// given a negative duration, wait forever for a graceful stop. -func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := daemon.Mount(container); err == nil { - defer daemon.Unmount(container) - } - - if err := daemon.containerStop(container, seconds); err != nil { - return err - } - - if err := daemon.containerStart(container); err != nil { - return err - } - - daemon.LogContainerEvent(container, "restart") - return nil -} diff --git a/daemon/search.go b/daemon/search.go deleted file mode 100644 index 09c6ae49b..000000000 --- a/daemon/search.go +++ /dev/null @@ -1,94 +0,0 @@ -package daemon - -import ( - "fmt" - "strconv" - - "golang.org/x/net/context" - - "github.com/docker/docker/dockerversion" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - registrytypes "github.com/docker/engine-api/types/registry" -) - -var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, - "is-official": true, - "stars": true, -} - -// SearchRegistryForImages queries the registry for images matching -// term. authConfig is used to login. -func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, - authConfig *types.AuthConfig, - headers map[string][]string) (*registrytypes.SearchResults, error) { - - searchFilters, err := filters.FromParam(filtersArgs) - if err != nil { - return nil, err - } - if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { - return nil, err - } - - var isAutomated, isOfficial bool - var hasStarFilter = 0 - if searchFilters.Include("is-automated") { - if searchFilters.UniqueExactMatch("is-automated", "true") { - isAutomated = true - } else if !searchFilters.UniqueExactMatch("is-automated", "false") { - return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) - } - } - if searchFilters.Include("is-official") { - if searchFilters.UniqueExactMatch("is-official", "true") { - isOfficial = true - } else if !searchFilters.UniqueExactMatch("is-official", "false") { - return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) - } - } - if searchFilters.Include("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { - iHasStar, err := strconv.Atoi(hasStar) - if err != nil { - return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) - } - if iHasStar > hasStarFilter { - hasStarFilter = iHasStar - } - } - } - - unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) - if err != nil { - return nil, err - } - - filteredResults := []registrytypes.SearchResult{} - for _, result := range unfilteredResult.Results { - if searchFilters.Include("is-automated") { - if isAutomated != result.IsAutomated { - continue - } - } - if searchFilters.Include("is-official") { - if isOfficial != result.IsOfficial { - continue - } - } - if searchFilters.Include("stars") { - if result.StarCount < hasStarFilter { - continue - } - } - filteredResults = append(filteredResults, result) - } - - return ®istrytypes.SearchResults{ - Query: unfilteredResult.Query, - NumResults: len(filteredResults), - Results: filteredResults, - }, nil -} diff --git a/daemon/search_test.go b/daemon/search_test.go deleted file mode 100644 index bffa84e19..000000000 --- a/daemon/search_test.go +++ /dev/null @@ -1,357 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -type FakeService struct { - registry.DefaultService - - shouldReturnError bool - - term string - results []registrytypes.SearchResult -} - -func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - if s.shouldReturnError { - return nil, fmt.Errorf("Search unknown error") - } - return ®istrytypes.SearchResults{ - Query: s.term, - NumResults: len(s.results), - Results: s.results, - }, nil -} - -func TestSearchRegistryForImagesErrors(t *testing.T) { - errorCases := []struct { - filtersArgs string - shouldReturnError bool - expectedError string - }{ - { - expectedError: "Search unknown error", - shouldReturnError: true, - }, - { - filtersArgs: "invalid json", - expectedError: "invalid character 'i' looking for beginning of value", - }, - { - filtersArgs: `{"type":{"custom":true}}`, - expectedError: "Invalid filter 'type'", - }, - { - filtersArgs: `{"is-automated":{"invalid":true}}`, - expectedError: "Invalid filter 'is-automated=[invalid]'", - }, - { - filtersArgs: `{"is-automated":{"true":true,"false":true}}`, - expectedError: "Invalid filter 'is-automated", - }, - { - filtersArgs: `{"is-official":{"invalid":true}}`, - expectedError: "Invalid filter 'is-official=[invalid]'", - }, - { - filtersArgs: `{"is-official":{"true":true,"false":true}}`, - expectedError: "Invalid filter 'is-official", - }, - { - filtersArgs: `{"stars":{"invalid":true}}`, - expectedError: "Invalid filter 'stars=invalid'", - }, - { - filtersArgs: `{"stars":{"1":true,"invalid":true}}`, - expectedError: "Invalid filter 'stars=invalid'", - }, - } - for index, e := range errorCases { - daemon := &Daemon{ - RegistryService: &FakeService{ - shouldReturnError: e.shouldReturnError, - }, - } - _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) - if err == nil { - t.Errorf("%d: expected an error, got nothing", index) - } - if !strings.Contains(err.Error(), e.expectedError) { - t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) - } - } -} - -func TestSearchRegistryForImages(t *testing.T) { - term := "term" - successCases := []struct { - filtersArgs string - registryResults []registrytypes.SearchResult - expectedResults []registrytypes.SearchResult - }{ - { - filtersArgs: "", - registryResults: []registrytypes.SearchResult{}, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: "", - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - }, - { - filtersArgs: `{"is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - }, - { - filtersArgs: `{"is-automated":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-automated":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: false, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: false, - }, - }, - }, - { - filtersArgs: `{"is-official":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-official":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - }, - { - filtersArgs: `{"is-official":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-official":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: false, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: false, - }, - }, - }, - { - filtersArgs: `{"stars":{"0":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - }, - { - filtersArgs: `{"stars":{"1":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"stars":{"1":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name0", - Description: "description0", - StarCount: 0, - }, - { - Name: "name1", - Description: "description1", - StarCount: 1, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name1", - Description: "description1", - StarCount: 1, - }, - }, - }, - { - filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name0", - Description: "description0", - StarCount: 0, - IsOfficial: true, - IsAutomated: true, - }, - { - Name: "name1", - Description: "description1", - StarCount: 1, - IsOfficial: false, - IsAutomated: true, - }, - { - Name: "name2", - Description: "description2", - StarCount: 1, - IsOfficial: true, - IsAutomated: false, - }, - { - Name: "name3", - Description: "description3", - StarCount: 2, - IsOfficial: true, - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name3", - Description: "description3", - StarCount: 2, - IsOfficial: true, - IsAutomated: true, - }, - }, - }, - } - for index, s := range successCases { - daemon := &Daemon{ - RegistryService: &FakeService{ - term: term, - results: s.registryResults, - }, - } - results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) - if err != nil { - t.Errorf("%d: %v", index, err) - } - if results.Query != term { - t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) - } - if results.NumResults != len(s.expectedResults) { - t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) - } - for _, result := range results.Results { - found := false - for _, expectedResult := range s.expectedResults { - if expectedResult.Name == result.Name && - expectedResult.Description == result.Description && - expectedResult.IsAutomated == result.IsAutomated && - expectedResult.IsOfficial == result.IsOfficial && - expectedResult.StarCount == result.StarCount { - found = true - } - } - if !found { - t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) - } - } - } -} diff --git a/daemon/seccomp_disabled.go b/daemon/seccomp_disabled.go deleted file mode 100644 index 4ad1b7c53..000000000 --- a/daemon/seccomp_disabled.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build linux,!seccomp - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/opencontainers/specs/specs-go" -) - -var supportsSeccomp = false - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") - } - return nil -} diff --git a/daemon/seccomp_linux.go b/daemon/seccomp_linux.go deleted file mode 100644 index e9622787e..000000000 --- a/daemon/seccomp_linux.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux,seccomp - -package daemon - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/profiles/seccomp" - "github.com/opencontainers/specs/specs-go" -) - -var supportsSeccomp = true - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - var profile *specs.Seccomp - var err error - - if c.HostConfig.Privileged { - return nil - } - - if !daemon.seccompEnabled { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") - } - logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") - c.SeccompProfile = "unconfined" - } - if c.SeccompProfile == "unconfined" { - return nil - } - if c.SeccompProfile != "" { - profile, err = seccomp.LoadProfile(c.SeccompProfile) - if err != nil { - return err - } - } else { - profile, err = seccomp.GetDefaultProfile(rs) - if err != nil { - return err - } - } - - rs.Linux.Seccomp = profile - return nil -} diff --git a/daemon/seccomp_unsupported.go b/daemon/seccomp_unsupported.go deleted file mode 100644 index b3691e96a..000000000 --- a/daemon/seccomp_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux - -package daemon - -var supportsSeccomp = false diff --git a/daemon/selinux_linux.go b/daemon/selinux_linux.go deleted file mode 100644 index 83a344711..000000000 --- a/daemon/selinux_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux - -package daemon - -import "github.com/opencontainers/runc/libcontainer/selinux" - -func selinuxSetDisabled() { - selinux.SetDisabled() -} - -func selinuxFreeLxcContexts(label string) { - selinux.FreeLxcContexts(label) -} - -func selinuxEnabled() bool { - return selinux.SelinuxEnabled() -} diff --git a/daemon/selinux_unsupported.go b/daemon/selinux_unsupported.go deleted file mode 100644 index 25a56ad15..000000000 --- a/daemon/selinux_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package daemon - -func selinuxSetDisabled() { -} - -func selinuxFreeLxcContexts(label string) { -} - -func selinuxEnabled() bool { - return false -} diff --git a/daemon/start.go b/daemon/start.go deleted file mode 100644 index 7a0bc2121..000000000 --- a/daemon/start.go +++ /dev/null @@ -1,194 +0,0 @@ -package daemon - -import ( - "fmt" - "net/http" - "runtime" - "strings" - "syscall" - - "google.golang.org/grpc" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" -) - -// ContainerStart starts a container. -func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, validateHostname bool) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if container.IsPaused() { - return fmt.Errorf("Cannot start a paused container, try unpause instead.") - } - - if container.IsRunning() { - err := fmt.Errorf("Container already started") - return errors.NewErrorWithStatusCode(err, http.StatusNotModified) - } - - // Windows does not have the backwards compatibility issue here. - if runtime.GOOS != "windows" { - // This is kept for backward compatibility - hostconfig should be passed when - // creating a container, not during start. - if hostConfig != nil { - logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12") - oldNetworkMode := container.HostConfig.NetworkMode - if err := daemon.setSecurityOptions(container, hostConfig); err != nil { - return err - } - if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { - return err - } - if err := daemon.setHostConfig(container, hostConfig); err != nil { - return err - } - newNetworkMode := container.HostConfig.NetworkMode - if string(oldNetworkMode) != string(newNetworkMode) { - // if user has change the network mode on starting, clean up the - // old networks. It is a deprecated feature and will be removed in Docker 1.12 - container.NetworkSettings.Networks = nil - if err := container.ToDisk(); err != nil { - return err - } - } - container.InitDNSHostConfig() - } - } else { - if hostConfig != nil { - return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") - } - } - - // check if hostConfig is in line with the current system settings. - // It may happen cgroups are umounted or the like. - if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false, validateHostname); err != nil { - return err - } - // Adapt for old containers in case we have updates in this function and - // old containers never have chance to call the new function in create stage. - if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { - return err - } - - return daemon.containerStart(container) -} - -// Start starts a container -func (daemon *Daemon) Start(container *container.Container) error { - return daemon.containerStart(container) -} - -// containerStart prepares the container to run by setting up everything the -// container needs, such as storage and networking, as well as links -// between containers. The container is left waiting for a signal to -// begin running. -func (daemon *Daemon) containerStart(container *container.Container) (err error) { - container.Lock() - defer container.Unlock() - - if container.Running { - return nil - } - - if container.RemovalInProgress || container.Dead { - return fmt.Errorf("Container is marked for removal and cannot be started.") - } - - // if we encounter an error during start we need to ensure that any other - // setup has been cleaned up properly - defer func() { - if err != nil { - container.SetError(err) - // if no one else has set it, make sure we don't leave it at zero - if container.ExitCode() == 0 { - container.SetExitCode(128) - } - container.ToDisk() - daemon.Cleanup(container) - } - }() - - if err := daemon.conditionalMountOnStart(container); err != nil { - return err - } - - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - - if err := daemon.initializeNetworking(container); err != nil { - return err - } - - spec, err := daemon.createSpec(container) - if err != nil { - return err - } - - createOptions := []libcontainerd.CreateOption{libcontainerd.WithRestartManager(container.RestartManager(true))} - copts, err := daemon.getLibcontainerdCreateOptions(container) - if err != nil { - return err - } - if copts != nil { - createOptions = append(createOptions, *copts...) - } - - if err := daemon.containerd.Create(container.ID, *spec, createOptions...); err != nil { - errDesc := grpc.ErrorDesc(err) - logrus.Errorf("Create container failed with error: %s", errDesc) - // if we receive an internal error from the initial start of a container then lets - // return it instead of entering the restart loop - // set to 127 for container cmd not found/does not exist) - if strings.Contains(errDesc, container.Path) && - (strings.Contains(errDesc, "executable file not found") || - strings.Contains(errDesc, "no such file or directory") || - strings.Contains(errDesc, "system cannot find the file specified")) { - container.SetExitCode(127) - } - // set to 126 for container cmd can't be invoked errors - if strings.Contains(errDesc, syscall.EACCES.Error()) { - container.SetExitCode(126) - } - - container.Reset(false) - - return fmt.Errorf("%s", errDesc) - } - - return nil -} - -// Cleanup releases any network resources allocated to the container along with any rules -// around how containers are linked together. It also unmounts the container's root filesystem. -func (daemon *Daemon) Cleanup(container *container.Container) { - daemon.releaseNetwork(container) - - container.UnmountIpcMounts(detachMounted) - - if err := daemon.conditionalUnmountOnCleanup(container); err != nil { - // FIXME: remove once reference counting for graphdrivers has been refactored - // Ensure that all the mounts are gone - if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { - daemon.cleanupMountsByID(mountid) - } - } - - for _, eConfig := range container.ExecCommands.Commands() { - daemon.unregisterExecCommand(container, eConfig) - } - - if container.BaseFS != "" { - if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { - logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) - } - } - container.CancelAttachContext() -} diff --git a/daemon/start_linux.go b/daemon/start_linux.go deleted file mode 100644 index c6ad22af8..000000000 --- a/daemon/start_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { - createOptions := []libcontainerd.CreateOption{} - - // Ensure a runtime has been assigned to this container - if container.HostConfig.Runtime == "" { - container.HostConfig.Runtime = stockRuntimeName - container.ToDisk() - } - - rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) - if rt == nil { - return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) - } - createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) - - return &createOptions, nil -} diff --git a/daemon/start_windows.go b/daemon/start_windows.go deleted file mode 100644 index af3fe7602..000000000 --- a/daemon/start_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { - return &[]libcontainerd.CreateOption{}, nil -} diff --git a/daemon/stats.go b/daemon/stats.go deleted file mode 100644 index f76a68977..000000000 --- a/daemon/stats.go +++ /dev/null @@ -1,194 +0,0 @@ -package daemon - -import ( - "encoding/json" - "errors" - "fmt" - "runtime" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "github.com/docker/engine-api/types/versions/v1p20" -) - -// ContainerStats writes information about the container to the stream -// given in the config object. -func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { - if runtime.GOOS == "windows" { - return errors.New("Windows does not support stats") - } - // Remote API version (used for backwards compatibility) - apiVersion := config.Version - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - - // If the container is not running and requires no stream, return an empty stats. - if !container.IsRunning() && !config.Stream { - return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) - } - - outStream := config.OutStream - if config.Stream { - wf := ioutils.NewWriteFlusher(outStream) - defer wf.Close() - wf.Flush() - outStream = wf - } - - var preCPUStats types.CPUStats - getStatJSON := func(v interface{}) *types.StatsJSON { - ss := v.(types.StatsJSON) - ss.PreCPUStats = preCPUStats - preCPUStats = ss.CPUStats - return &ss - } - - enc := json.NewEncoder(outStream) - - updates := daemon.subscribeToContainerStats(container) - defer daemon.unsubscribeToContainerStats(container, updates) - - noStreamFirstFrame := true - for { - select { - case v, ok := <-updates: - if !ok { - return nil - } - - var statsJSON interface{} - statsJSONPost120 := getStatJSON(v) - if versions.LessThan(apiVersion, "1.21") { - var ( - rxBytes uint64 - rxPackets uint64 - rxErrors uint64 - rxDropped uint64 - txBytes uint64 - txPackets uint64 - txErrors uint64 - txDropped uint64 - ) - for _, v := range statsJSONPost120.Networks { - rxBytes += v.RxBytes - rxPackets += v.RxPackets - rxErrors += v.RxErrors - rxDropped += v.RxDropped - txBytes += v.TxBytes - txPackets += v.TxPackets - txErrors += v.TxErrors - txDropped += v.TxDropped - } - statsJSON = &v1p20.StatsJSON{ - Stats: statsJSONPost120.Stats, - Network: types.NetworkStats{ - RxBytes: rxBytes, - RxPackets: rxPackets, - RxErrors: rxErrors, - RxDropped: rxDropped, - TxBytes: txBytes, - TxPackets: txPackets, - TxErrors: txErrors, - TxDropped: txDropped, - }, - } - } else { - statsJSON = statsJSONPost120 - } - - if !config.Stream && noStreamFirstFrame { - // prime the cpu stats so they aren't 0 in the final output - noStreamFirstFrame = false - continue - } - - if err := enc.Encode(statsJSON); err != nil { - return err - } - - if !config.Stream { - return nil - } - case <-ctx.Done(): - return nil - } - } -} - -func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { - return daemon.statsCollector.collect(c) -} - -func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { - daemon.statsCollector.unsubscribe(c, ch) -} - -// GetContainerStats collects all the stats published by a container -func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { - stats, err := daemon.stats(container) - if err != nil { - return nil, err - } - - if stats.Networks, err = daemon.getNetworkStats(container); err != nil { - return nil, err - } - - return stats, nil -} - -// Resolve Network SandboxID in case the container reuse another container's network stack -func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { - curr := c - for curr.HostConfig.NetworkMode.IsContainer() { - containerID := curr.HostConfig.NetworkMode.ConnectedContainer() - connected, err := daemon.GetContainer(containerID) - if err != nil { - return "", fmt.Errorf("Could not get container for %s", containerID) - } - curr = connected - } - return curr.NetworkSettings.SandboxID, nil -} - -func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { - sandboxID, err := daemon.getNetworkSandboxID(c) - if err != nil { - return nil, err - } - - sb, err := daemon.netController.SandboxByID(sandboxID) - if err != nil { - return nil, err - } - - lnstats, err := sb.Statistics() - if err != nil { - return nil, err - } - - stats := make(map[string]types.NetworkStats) - // Convert libnetwork nw stats into engine-api stats - for ifName, ifStats := range lnstats { - stats[ifName] = types.NetworkStats{ - RxBytes: ifStats.RxBytes, - RxPackets: ifStats.RxPackets, - RxErrors: ifStats.RxErrors, - RxDropped: ifStats.RxDropped, - TxBytes: ifStats.TxBytes, - TxPackets: ifStats.TxPackets, - TxErrors: ifStats.TxErrors, - TxDropped: ifStats.TxDropped, - } - } - - return stats, nil -} diff --git a/daemon/stats_collector_solaris.go b/daemon/stats_collector_solaris.go deleted file mode 100644 index 9cf9f0a94..000000000 --- a/daemon/stats_collector_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "time" -) - -// newStatsCollector returns a new statsCollector for collection stats -// for a registered container at the specified interval. The collector allows -// non-running containers to be added and will start processing stats when -// they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - return &statsCollector{} -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - return nil -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { -} diff --git a/daemon/stats_collector_unix.go b/daemon/stats_collector_unix.go deleted file mode 100644 index f66dc2c3c..000000000 --- a/daemon/stats_collector_unix.go +++ /dev/null @@ -1,189 +0,0 @@ -// +build !windows,!solaris - -package daemon - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/pubsub" - sysinfo "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/opencontainers/runc/libcontainer/system" -) - -type statsSupervisor interface { - // GetContainerStats collects all the stats related to a container - GetContainerStats(container *container.Container) (*types.StatsJSON, error) -} - -// newStatsCollector returns a new statsCollector that collections -// network and cgroup stats for a registered container at the specified -// interval. The collector allows non-running containers to be added -// and will start processing stats when they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - s := &statsCollector{ - interval: interval, - supervisor: daemon, - publishers: make(map[*container.Container]*pubsub.Publisher), - clockTicksPerSecond: uint64(system.GetClockTicks()), - bufReader: bufio.NewReaderSize(nil, 128), - } - meminfo, err := sysinfo.ReadMemInfo() - if err == nil && meminfo.MemTotal > 0 { - s.machineMemory = uint64(meminfo.MemTotal) - } - - go s.run() - return s -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { - m sync.Mutex - supervisor statsSupervisor - interval time.Duration - clockTicksPerSecond uint64 - publishers map[*container.Container]*pubsub.Publisher - bufReader *bufio.Reader - machineMemory uint64 -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - s.m.Lock() - defer s.m.Unlock() - publisher, exists := s.publishers[c] - if !exists { - publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) - s.publishers[c] = publisher - } - return publisher.Subscribe() -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { - s.m.Lock() - if publisher, exists := s.publishers[c]; exists { - publisher.Close() - delete(s.publishers, c) - } - s.m.Unlock() -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { - s.m.Lock() - publisher := s.publishers[c] - if publisher != nil { - publisher.Evict(ch) - if publisher.Len() == 0 { - delete(s.publishers, c) - } - } - s.m.Unlock() -} - -func (s *statsCollector) run() { - type publishersPair struct { - container *container.Container - publisher *pubsub.Publisher - } - // we cannot determine the capacity here. - // it will grow enough in first iteration - var pairs []publishersPair - - for range time.Tick(s.interval) { - // it does not make sense in the first iteration, - // but saves allocations in further iterations - pairs = pairs[:0] - - s.m.Lock() - for container, publisher := range s.publishers { - // copy pointers here to release the lock ASAP - pairs = append(pairs, publishersPair{container, publisher}) - } - s.m.Unlock() - if len(pairs) == 0 { - continue - } - - systemUsage, err := s.getSystemCPUUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage: %v", err) - continue - } - - for _, pair := range pairs { - stats, err := s.supervisor.GetContainerStats(pair.container) - if err != nil { - if _, ok := err.(errNotRunning); !ok { - logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) - } - continue - } - // FIXME: move to containerd - stats.CPUStats.SystemUsage = systemUsage - - pair.publisher.Publish(*stats) - } - } -} - -const nanoSecondsPerSecond = 1e9 - -// getSystemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. -// -// Uses /proc/stat defined by POSIX. Looks for the cpu -// statistics line and then sums up the first seven fields -// provided. See `man 5 proc` for details on specific field -// information. -func (s *statsCollector) getSystemCPUUsage() (uint64, error) { - var line string - f, err := os.Open("/proc/stat") - if err != nil { - return 0, err - } - defer func() { - s.bufReader.Reset(nil) - f.Close() - }() - s.bufReader.Reset(f) - err = nil - for err == nil { - line, err = s.bufReader.ReadString('\n') - if err != nil { - break - } - parts := strings.Fields(line) - switch parts[0] { - case "cpu": - if len(parts) < 8 { - return 0, fmt.Errorf("invalid number of cpu fields") - } - var totalClockTicks uint64 - for _, i := range parts[1:8] { - v, err := strconv.ParseUint(i, 10, 64) - if err != nil { - return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) - } - totalClockTicks += v - } - return (totalClockTicks * nanoSecondsPerSecond) / - s.clockTicksPerSecond, nil - } - } - return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") -} diff --git a/daemon/stats_collector_windows.go b/daemon/stats_collector_windows.go deleted file mode 100644 index b6cb24cd7..000000000 --- a/daemon/stats_collector_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -package daemon - -import ( - "time" - - "github.com/docker/docker/container" -) - -// newStatsCollector returns a new statsCollector for collection stats -// for a registered container at the specified interval. The collector allows -// non-running containers to be added and will start processing stats when -// they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - return &statsCollector{} -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - return nil -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { -} diff --git a/daemon/stop.go b/daemon/stop.go deleted file mode 100644 index 4bbdbbd74..000000000 --- a/daemon/stop.go +++ /dev/null @@ -1,67 +0,0 @@ -package daemon - -import ( - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" -) - -// ContainerStop looks for the given container and terminates it, -// waiting the given number of seconds before forcefully killing the -// container. If a negative number of seconds is given, ContainerStop -// will wait for a graceful termination. An error is returned if the -// container is not found, is already stopped, or if there is a -// problem stopping the container. -func (daemon *Daemon) ContainerStop(name string, seconds int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if !container.IsRunning() { - err := fmt.Errorf("Container %s is already stopped", name) - return errors.NewErrorWithStatusCode(err, http.StatusNotModified) - } - if err := daemon.containerStop(container, seconds); err != nil { - return fmt.Errorf("Cannot stop container %s: %v", name, err) - } - return nil -} - -// containerStop halts a container by sending a stop signal, waiting for the given -// duration in seconds, and then calling SIGKILL and waiting for the -// process to exit. If a negative duration is given, Stop will wait -// for the initial signal forever. If the container is not running Stop returns -// immediately. -func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { - if !container.IsRunning() { - return nil - } - - daemon.stopHealthchecks(container) - - stopSignal := container.StopSignal() - // 1. Send a stop signal - if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { - logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal) - if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { - return err - } - } - - // 2. Wait for the process to exit on its own - if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { - logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) - // 3. If it doesn't, then send SIGKILL - if err := daemon.Kill(container); err != nil { - container.WaitStop(-1 * time.Second) - logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it - } - } - - daemon.LogContainerEvent(container, "stop") - return nil -} diff --git a/daemon/top_unix.go b/daemon/top_unix.go deleted file mode 100644 index 935f38f29..000000000 --- a/daemon/top_unix.go +++ /dev/null @@ -1,126 +0,0 @@ -//+build !windows - -package daemon - -import ( - "fmt" - "os/exec" - "regexp" - "strconv" - "strings" - - "github.com/docker/engine-api/types" -) - -func validatePSArgs(psArgs string) error { - // NOTE: \\s does not detect unicode whitespaces. - // So we use fieldsASCII instead of strings.Fields in parsePSOutput. - // See https://github.com/docker/docker/pull/24358 - re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") - for _, group := range re.FindAllStringSubmatch(psArgs, -1) { - if len(group) >= 3 { - k := group[1] - v := group[2] - if k != "pid" { - return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) - } - } - } - return nil -} - -// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces -func fieldsASCII(s string) []string { - fn := func(r rune) bool { - switch r { - case '\t', '\n', '\f', '\r', ' ': - return true - } - return false - } - return strings.FieldsFunc(s, fn) -} - -func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { - procList := &types.ContainerProcessList{} - - lines := strings.Split(string(output), "\n") - procList.Titles = fieldsASCII(lines[0]) - - pidIndex := -1 - for i, name := range procList.Titles { - if name == "PID" { - pidIndex = i - } - } - if pidIndex == -1 { - return nil, fmt.Errorf("Couldn't find PID field in ps output") - } - - // loop through the output and extract the PID from each line - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := fieldsASCII(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(procList.Titles)-1] - process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) - procList.Processes = append(procList.Processes, process) - } - } - } - return procList, nil -} - -// ContainerTop lists the processes running inside of the given -// container by calling ps with the given args, or with the flags -// "-ef" if no args are given. An error is returned if the container -// is not found, or is not running, or if there are any problems -// running ps, or parsing the output. -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { - if psArgs == "" { - psArgs = "-ef" - } - - if err := validatePSArgs(psArgs); err != nil { - return nil, err - } - - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning{container.ID} - } - - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - - pids, err := daemon.containerd.GetPidsForContainer(container.ID) - if err != nil { - return nil, err - } - - output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() - if err != nil { - return nil, fmt.Errorf("Error running ps: %v", err) - } - procList, err := parsePSOutput(output, pids) - if err != nil { - return nil, err - } - daemon.LogContainerEvent(container, "top") - return procList, nil -} diff --git a/daemon/top_unix_test.go b/daemon/top_unix_test.go deleted file mode 100644 index 269ab6e94..000000000 --- a/daemon/top_unix_test.go +++ /dev/null @@ -1,76 +0,0 @@ -//+build !windows - -package daemon - -import ( - "testing" -) - -func TestContainerTopValidatePSArgs(t *testing.T) { - tests := map[string]bool{ - "ae -o uid=PID": true, - "ae -o \"uid= PID\"": true, // ascii space (0x20) - "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) - "ae o uid=PID": true, - "aeo uid=PID": true, - "ae -O uid=PID": true, - "ae -o pid=PID2 -o uid=PID": true, - "ae -o pid=PID": false, - "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this - "aeo pid=PID": false, - "ae": false, - "": false, - } - for psArgs, errExpected := range tests { - err := validatePSArgs(psArgs) - t.Logf("tested %q, got err=%v", psArgs, err) - if errExpected && err == nil { - t.Fatalf("expected error, got %v (%q)", err, psArgs) - } - if !errExpected && err != nil { - t.Fatalf("expected nil, got %v (%q)", err, psArgs) - } - } -} - -func TestContainerTopParsePSOutput(t *testing.T) { - tests := []struct { - output []byte - pids []int - errExpected bool - }{ - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, false}, - {[]byte(` UID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - // unicode space (U+2003, 0xe2 0x80 0x83) - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - // the first space is U+2003, the second one is ascii. - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - } - - for _, f := range tests { - _, err := parsePSOutput(f.output, f.pids) - t.Logf("tested %q, got err=%v", string(f.output), err) - if f.errExpected && err == nil { - t.Fatalf("expected error, got %v (%q)", err, string(f.output)) - } - if !f.errExpected && err != nil { - t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) - } - } -} diff --git a/daemon/top_windows.go b/daemon/top_windows.go deleted file mode 100644 index ea79ac86a..000000000 --- a/daemon/top_windows.go +++ /dev/null @@ -1,32 +0,0 @@ -package daemon - -import ( - "errors" - "strconv" - - "github.com/docker/engine-api/types" -) - -// ContainerTop is a minimal implementation on Windows currently. -// TODO Windows: This needs more work, but needs platform API support. -// All we can currently return (particularly in the case of Hyper-V containers) -// is a PID and the command. -func (daemon *Daemon) ContainerTop(containerID string, psArgs string) (*types.ContainerProcessList, error) { - - // It's really not an equivalent to linux 'ps' on Windows - if psArgs != "" { - return nil, errors.New("Windows does not support arguments to top") - } - - s, err := daemon.containerd.Summary(containerID) - if err != nil { - return nil, err - } - - procList := &types.ContainerProcessList{} - - for _, v := range s { - procList.Titles = append(procList.Titles, strconv.Itoa(int(v.Pid))+" "+v.Command) - } - return procList, nil -} diff --git a/daemon/unpause.go b/daemon/unpause.go deleted file mode 100644 index c1ab74b0b..000000000 --- a/daemon/unpause.go +++ /dev/null @@ -1,43 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerUnpause unpauses a container -func (daemon *Daemon) ContainerUnpause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if err := daemon.containerUnpause(container); err != nil { - return err - } - - return nil -} - -// containerUnpause resumes the container execution after the container is paused. -func (daemon *Daemon) containerUnpause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot unpause the container which is not running - if !container.Running { - return errNotRunning{container.ID} - } - - // We cannot unpause the container which is not paused - if !container.Paused { - return fmt.Errorf("Container %s is not paused", container.ID) - } - - if err := daemon.containerd.Resume(container.ID); err != nil { - return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) - } - - return nil -} diff --git a/daemon/update.go b/daemon/update.go deleted file mode 100644 index 73ae16a0c..000000000 --- a/daemon/update.go +++ /dev/null @@ -1,90 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/engine-api/types/container" -) - -// ContainerUpdate updates configuration of the container -func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error) { - var warnings []string - - warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true, validateHostname) - if err != nil { - return warnings, err - } - - if err := daemon.update(name, hostConfig); err != nil { - return warnings, err - } - - return warnings, nil -} - -// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. -func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { - if len(cmd) == 0 { - return nil - } - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - c.Path = cmd[0] - c.Args = cmd[1:] - return nil -} - -func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { - if hostConfig == nil { - return nil - } - - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - restoreConfig := false - backupHostConfig := *container.HostConfig - defer func() { - if restoreConfig { - container.Lock() - container.HostConfig = &backupHostConfig - container.ToDisk() - container.Unlock() - } - }() - - if container.RemovalInProgress || container.Dead { - return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) - } - - if err := container.UpdateContainer(hostConfig); err != nil { - restoreConfig = true - return errCannotUpdate(container.ID, err) - } - - // if Restart Policy changed, we need to update container monitor - container.UpdateMonitor(hostConfig.RestartPolicy) - - // If container is not running, update hostConfig struct is enough, - // resources will be updated when the container is started again. - // If container is running (including paused), we need to update configs - // to the real world. - if container.IsRunning() && !container.IsRestarting() { - if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { - restoreConfig = true - return errCannotUpdate(container.ID, err) - } - } - - daemon.LogContainerEvent(container, "update") - - return nil -} - -func errCannotUpdate(containerID string, err error) error { - return fmt.Errorf("Cannot update container %s: %v", containerID, err) -} diff --git a/daemon/update_linux.go b/daemon/update_linux.go deleted file mode 100644 index 69cc0840a..000000000 --- a/daemon/update_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - r.BlkioWeight = uint64(resources.BlkioWeight) - r.CpuShares = uint64(resources.CPUShares) - r.CpuPeriod = uint64(resources.CPUPeriod) - r.CpuQuota = uint64(resources.CPUQuota) - r.CpusetCpus = resources.CpusetCpus - r.CpusetMems = resources.CpusetMems - r.MemoryLimit = uint64(resources.Memory) - if resources.MemorySwap > 0 { - r.MemorySwap = uint64(resources.MemorySwap) - } - r.MemoryReservation = uint64(resources.MemoryReservation) - r.KernelMemoryLimit = uint64(resources.KernelMemory) - return r -} diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go deleted file mode 100644 index 848adae9d..000000000 --- a/daemon/update_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r -} diff --git a/daemon/update_windows.go b/daemon/update_windows.go deleted file mode 100644 index 2cd0ff261..000000000 --- a/daemon/update_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r -} diff --git a/daemon/volumes.go b/daemon/volumes.go deleted file mode 100644 index d178d410a..000000000 --- a/daemon/volumes.go +++ /dev/null @@ -1,185 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" -) - -var ( - // ErrVolumeReadonly is used to signal an error when trying to copy data into - // a volume mount that is not writable. - ErrVolumeReadonly = errors.New("mounted volume is marked read-only") -) - -type mounts []container.Mount - -// volumeToAPIType converts a volume.Volume to the type used by the remote API -func volumeToAPIType(v volume.Volume) *types.Volume { - tv := &types.Volume{ - Name: v.Name(), - Driver: v.DriverName(), - } - if v, ok := v.(volume.LabeledVolume); ok { - tv.Labels = v.Labels() - } - - if v, ok := v.(volume.ScopedVolume); ok { - tv.Scope = v.Scope() - } - return tv -} - -// Len returns the number of mounts. Used in sorting. -func (m mounts) Len() int { - return len(m) -} - -// Less returns true if the number of parts (a/b/c would be 3 parts) in the -// mount indexed by parameter 1 is less than that of the mount indexed by -// parameter 2. Used in sorting. -func (m mounts) Less(i, j int) bool { - return m.parts(i) < m.parts(j) -} - -// Swap swaps two items in an array of mounts. Used in sorting -func (m mounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// parts returns the number of parts in the destination of a mount. Used in sorting. -func (m mounts) parts(i int) int { - return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) -} - -// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. -// It follows the next sequence to decide what to mount in each final destination: -// -// 1. Select the previously configured mount points for the containers, if any. -// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. -// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. -// 4. Cleanup old volumes that are about to be reassigned. -func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { - binds := map[string]bool{} - mountPoints := map[string]*volume.MountPoint{} - defer func() { - // clean up the container mountpoints once return with error - if retErr != nil { - for _, m := range mountPoints { - if m.Volume == nil { - continue - } - daemon.volumes.Dereference(m.Volume, container.ID) - } - } - }() - - // 1. Read already configured mount points. - for name, point := range container.MountPoints { - mountPoints[name] = point - } - - // 2. Read volumes from other containers. - for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := volume.ParseVolumesFrom(v) - if err != nil { - return err - } - - c, err := daemon.GetContainer(containerID) - if err != nil { - return err - } - - for _, m := range c.MountPoints { - cp := &volume.MountPoint{ - Name: m.Name, - Source: m.Source, - RW: m.RW && volume.ReadWrite(mode), - Driver: m.Driver, - Destination: m.Destination, - Propagation: m.Propagation, - Named: m.Named, - } - - if len(cp.Source) == 0 { - v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) - if err != nil { - return err - } - cp.Volume = v - } - - mountPoints[cp.Destination] = cp - } - } - - // 3. Read bind mounts - for _, b := range hostConfig.Binds { - // #10618 - bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) - if err != nil { - return err - } - - _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] - if binds[bind.Destination] || tmpfsExists { - return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) - } - - if len(bind.Name) > 0 { - // create the volume - v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) - if err != nil { - return err - } - bind.Volume = v - bind.Source = v.Path() - // bind.Name is an already existing volume, we need to use that here - bind.Driver = v.DriverName() - bind.Named = true - if bind.Driver == "local" { - bind = setBindModeIfNull(bind) - } - } - - binds[bind.Destination] = true - mountPoints[bind.Destination] = bind - } - - container.Lock() - - // 4. Cleanup old volumes that are about to be reassigned. - for _, m := range mountPoints { - if m.BackwardsCompatible() { - if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { - daemon.volumes.Dereference(mp.Volume, container.ID) - } - } - } - container.MountPoints = mountPoints - - container.Unlock() - - return nil -} - -// lazyInitializeVolume initializes a mountpoint's volume if needed. -// This happens after a daemon restart. -func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { - if len(m.Driver) > 0 && m.Volume == nil { - v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) - if err != nil { - return err - } - m.Volume = v - } - return nil -} diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go deleted file mode 100644 index 450d17f97..000000000 --- a/daemon/volumes_unit_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package daemon - -import ( - "testing" - - "github.com/docker/docker/volume" -) - -func TestParseVolumesFrom(t *testing.T) { - cases := []struct { - spec string - expID string - expMode string - fail bool - }{ - {"", "", "", true}, - {"foobar", "foobar", "rw", false}, - {"foobar:rw", "foobar", "rw", false}, - {"foobar:ro", "foobar", "ro", false}, - {"foobar:baz", "", "", true}, - } - - for _, c := range cases { - id, mode, err := volume.ParseVolumesFrom(c.spec) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) - } - continue - } - - if id != c.expID { - t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) - } - if mode != c.expMode { - t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) - } - } -} diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go deleted file mode 100644 index ca0628c89..000000000 --- a/daemon/volumes_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "sort" - "strconv" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" -) - -// setupMounts iterates through each of the mount points for a container and -// calls Setup() on each. It also looks to see if is a network mount such as -// /etc/resolv.conf, and if it is not, appends it to the array of mounts. -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mounts []container.Mount - // TODO: tmpfs mounts should be part of Mountpoints - tmpfsMounts := make(map[string]bool) - for _, m := range c.TmpfsMounts() { - tmpfsMounts[m.Destination] = true - } - for _, m := range c.MountPoints { - if tmpfsMounts[m.Destination] { - continue - } - if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { - return nil, err - } - path, err := m.Setup(c.MountLabel) - if err != nil { - return nil, err - } - if !c.TrySetNetworkMount(m.Destination, path) { - mnt := container.Mount{ - Source: path, - Destination: m.Destination, - Writable: m.RW, - Propagation: m.Propagation, - } - if m.Volume != nil { - attributes := map[string]string{ - "driver": m.Volume.DriverName(), - "container": c.ID, - "destination": m.Destination, - "read/write": strconv.FormatBool(m.RW), - "propagation": m.Propagation, - } - daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) - } - mounts = append(mounts, mnt) - } - } - - mounts = sortMounts(mounts) - netMounts := c.NetworkMounts() - // if we are going to mount any of the network files from container - // metadata, the ownership must be set properly for potential container - // remapped root (user namespaces) - rootUID, rootGID := daemon.GetRemappedUIDGID() - for _, mount := range netMounts { - if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { - return nil, err - } - } - return append(mounts, netMounts...), nil -} - -// sortMounts sorts an array of mounts in lexicographic order. This ensure that -// when mounting, the mounts don't shadow other mounts. For example, if mounting -// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. -func sortMounts(m []container.Mount) []container.Mount { - sort.Sort(mounts(m)) - return m -} - -// setBindModeIfNull is platform specific processing to ensure the -// shared mode is set to 'z' if it is null. This is called in the case -// of processing a named volume and not a typical bind. -func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { - if bind.Mode == "" { - bind.Mode = "z" - } - return bind -} diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go deleted file mode 100644 index e7f9c098d..000000000 --- a/daemon/volumes_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build windows - -package daemon - -import ( - "fmt" - "sort" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" -) - -// setupMounts configures the mount points for a container by appending each -// of the configured mounts on the container to the OCI mount structure -// which will ultimately be passed into the oci runtime during container creation. -// It also ensures each of the mounts are lexographically sorted. - -// BUGBUG TODO Windows containerd. This would be much better if it returned -// an array of windowsoci mounts, not container mounts. Then no need to -// do multiple transitions. - -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mnts []container.Mount - for _, mount := range c.MountPoints { // type is volume.MountPoint - if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { - return nil, err - } - // If there is no source, take it from the volume path - s := mount.Source - if s == "" && mount.Volume != nil { - s = mount.Volume.Path() - } - if s == "" { - return nil, fmt.Errorf("No source for mount name '%s' driver %q destination '%s'", mount.Name, mount.Driver, mount.Destination) - } - mnts = append(mnts, container.Mount{ - Source: s, - Destination: mount.Destination, - Writable: mount.RW, - }) - } - - sort.Sort(mounts(mnts)) - return mnts, nil -} - -// setBindModeIfNull is platform specific processing which is a no-op on -// Windows. -func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { - return bind -} diff --git a/daemon/wait.go b/daemon/wait.go deleted file mode 100644 index 2dab22e99..000000000 --- a/daemon/wait.go +++ /dev/null @@ -1,32 +0,0 @@ -package daemon - -import ( - "time" - - "golang.org/x/net/context" -) - -// ContainerWait stops processing until the given container is -// stopped. If the container is not found, an error is returned. On a -// successful stop, the exit code of the container is returned. On a -// timeout, an error is returned. If you want to wait forever, supply -// a negative duration for the timeout. -func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return -1, err - } - - return container.WaitStop(timeout) -} - -// ContainerWaitWithContext returns a channel where exit code is sent -// when container stops. Channel can be cancelled with a context. -func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return container.WaitWithContext(ctx) -} diff --git a/distribution/errors.go b/distribution/errors.go deleted file mode 100644 index a9a17bd26..000000000 --- a/distribution/errors.go +++ /dev/null @@ -1,113 +0,0 @@ -package distribution - -import ( - "net/url" - "strings" - "syscall" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/distribution/xfer" -) - -// ErrNoSupport is an error type used for errors indicating that an operation -// is not supported. It encapsulates a more specific error. -type ErrNoSupport struct{ Err error } - -func (e ErrNoSupport) Error() string { - if e.Err == nil { - return "not supported" - } - return e.Err.Error() -} - -// fallbackError wraps an error that can possibly allow fallback to a different -// endpoint. -type fallbackError struct { - // err is the error being wrapped. - err error - // confirmedV2 is set to true if it was confirmed that the registry - // supports the v2 protocol. This is used to limit fallbacks to the v1 - // protocol. - confirmedV2 bool - // transportOK is set to true if we managed to speak HTTP with the - // registry. This confirms that we're using appropriate TLS settings - // (or lack of TLS). - transportOK bool -} - -// Error renders the FallbackError as a string. -func (f fallbackError) Error() string { - return f.err.Error() -} - -// shouldV2Fallback returns true if this error is a reason to fall back to v1. -func shouldV2Fallback(err errcode.Error) bool { - switch err.Code { - case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: - return true - } - return false -} - -// continueOnError returns true if we should fallback to the next endpoint -// as a result of this error. -func continueOnError(err error) bool { - switch v := err.(type) { - case errcode.Errors: - if len(v) == 0 { - return true - } - return continueOnError(v[0]) - case ErrNoSupport: - return continueOnError(v.Err) - case errcode.Error: - return shouldV2Fallback(v) - case *client.UnexpectedHTTPResponseError: - return true - case ImageConfigPullError: - return false - case error: - return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return true -} - -// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the -// operation after this error. -func retryOnError(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - return retryOnError(v[0]) - } - case errcode.Error: - switch v.Code { - case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: - return xfer.DoNotRetry{Err: err} - } - case *url.Error: - switch v.Err { - case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: - return xfer.DoNotRetry{Err: v.Err} - } - return retryOnError(v.Err) - case *client.UnexpectedHTTPResponseError: - return xfer.DoNotRetry{Err: err} - case error: - if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { - return xfer.DoNotRetry{Err: err} - } - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return err -} diff --git a/distribution/fixtures/validate_manifest/bad_manifest b/distribution/fixtures/validate_manifest/bad_manifest deleted file mode 100644 index a1f02a62a..000000000 --- a/distribution/fixtures/validate_manifest/bad_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 2, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/distribution/fixtures/validate_manifest/extra_data_manifest b/distribution/fixtures/validate_manifest/extra_data_manifest deleted file mode 100644 index beec19a80..000000000 --- a/distribution/fixtures/validate_manifest/extra_data_manifest +++ /dev/null @@ -1,46 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "fsLayers": [ - { - "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/distribution/fixtures/validate_manifest/good_manifest b/distribution/fixtures/validate_manifest/good_manifest deleted file mode 100644 index b107de322..000000000 --- a/distribution/fixtures/validate_manifest/good_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} \ No newline at end of file diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go deleted file mode 100644 index 05ba4f817..000000000 --- a/distribution/metadata/metadata.go +++ /dev/null @@ -1,75 +0,0 @@ -package metadata - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -// Store implements a K/V store for mapping distribution-related IDs -// to on-disk layer IDs and image IDs. The namespace identifies the type of -// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. -type Store interface { - // Get retrieves data by namespace and key. - Get(namespace string, key string) ([]byte, error) - // Set writes data indexed by namespace and key. - Set(namespace, key string, value []byte) error - // Delete removes data indexed by namespace and key. - Delete(namespace, key string) error -} - -// FSMetadataStore uses the filesystem to associate metadata with layer and -// image IDs. -type FSMetadataStore struct { - sync.RWMutex - basePath string -} - -// NewFSMetadataStore creates a new filesystem-based metadata store. -func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { - if err := os.MkdirAll(basePath, 0700); err != nil { - return nil, err - } - return &FSMetadataStore{ - basePath: basePath, - }, nil -} - -func (store *FSMetadataStore) path(namespace, key string) string { - return filepath.Join(store.basePath, namespace, key) -} - -// Get retrieves data by namespace and key. The data is read from a file named -// after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { - store.RLock() - defer store.RUnlock() - - return ioutil.ReadFile(store.path(namespace, key)) -} - -// Set writes data indexed by namespace and key. The data is written to a file -// named after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - return ioutils.AtomicWriteFile(path, value, 0644) -} - -// Delete removes data indexed by namespace and key. The data file named after -// the key, stored in the namespace's directory is deleted. -func (store *FSMetadataStore) Delete(namespace, key string) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - return os.Remove(path) -} diff --git a/distribution/metadata/v1_id_service.go b/distribution/metadata/v1_id_service.go deleted file mode 100644 index f6e458924..000000000 --- a/distribution/metadata/v1_id_service.go +++ /dev/null @@ -1,44 +0,0 @@ -package metadata - -import ( - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" -) - -// V1IDService maps v1 IDs to layers on disk. -type V1IDService struct { - store Store -} - -// NewV1IDService creates a new V1 ID mapping service. -func NewV1IDService(store Store) *V1IDService { - return &V1IDService{ - store: store, - } -} - -// namespace returns the namespace used by this service. -func (idserv *V1IDService) namespace() string { - return "v1id" -} - -// Get finds a layer by its V1 ID. -func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { - if err := v1.ValidateID(v1ID); err != nil { - return layer.DiffID(""), err - } - - idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) - if err != nil { - return layer.DiffID(""), err - } - return layer.DiffID(idBytes), nil -} - -// Set associates an image with a V1 ID. -func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { - if err := v1.ValidateID(v1ID); err != nil { - return err - } - return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) -} diff --git a/distribution/metadata/v1_id_service_test.go b/distribution/metadata/v1_id_service_test.go deleted file mode 100644 index 556886581..000000000 --- a/distribution/metadata/v1_id_service_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package metadata - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/layer" -) - -func TestV1IDService(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "v1-id-service-test") - if err != nil { - t.Fatalf("could not create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - metadataStore, err := NewFSMetadataStore(tmpDir) - if err != nil { - t.Fatalf("could not create metadata store: %v", err) - } - v1IDService := NewV1IDService(metadataStore) - - testVectors := []struct { - registry string - v1ID string - layerID layer.DiffID - }{ - { - registry: "registry1", - v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", - layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), - }, - { - registry: "registry2", - v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", - layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), - }, - { - registry: "registry1", - v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", - layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), - }, - } - - // Set some associations - for _, vec := range testVectors { - err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - } - - // Check the correct values are read back - for _, vec := range testVectors { - layerID, err := v1IDService.Get(vec.v1ID, vec.registry) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - if layerID != vec.layerID { - t.Fatal("Get returned incorrect layer ID") - } - } - - // Test Get on a nonexistent entry - _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Overwrite one of the entries and read it back - err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - if layerID != testVectors[1].layerID { - t.Fatal("Get returned incorrect layer ID") - } -} diff --git a/distribution/metadata/v2_metadata_service.go b/distribution/metadata/v2_metadata_service.go deleted file mode 100644 index 239cd1f45..000000000 --- a/distribution/metadata/v2_metadata_service.go +++ /dev/null @@ -1,137 +0,0 @@ -package metadata - -import ( - "encoding/json" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// V2MetadataService maps layer IDs to a set of known metadata for -// the layer. -type V2MetadataService struct { - store Store -} - -// V2Metadata contains the digest and source repository information for a layer. -type V2Metadata struct { - Digest digest.Digest - SourceRepository string -} - -// maxMetadata is the number of metadata entries to keep per layer DiffID. -const maxMetadata = 50 - -// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. -func NewV2MetadataService(store Store) *V2MetadataService { - return &V2MetadataService{ - store: store, - } -} - -func (serv *V2MetadataService) diffIDNamespace() string { - return "v2metadata-by-diffid" -} - -func (serv *V2MetadataService) digestNamespace() string { - return "diffid-by-digest" -} - -func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { - return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() -} - -func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { - return string(dgst.Algorithm()) + "/" + dgst.Hex() -} - -// GetMetadata finds the metadata associated with a layer DiffID. -func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { - jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - if err != nil { - return nil, err - } - - var metadata []V2Metadata - if err := json.Unmarshal(jsonBytes, &metadata); err != nil { - return nil, err - } - - return metadata, nil -} - -// GetDiffID finds a layer DiffID from a digest. -func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { - diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) - if err != nil { - return layer.DiffID(""), err - } - - return layer.DiffID(diffIDBytes), nil -} - -// Add associates metadata with a layer DiffID. If too many metadata entries are -// present, the oldest one is dropped. -func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - newMetadata = append(newMetadata, metadata) - - if len(newMetadata) > maxMetadata { - newMetadata = newMetadata[len(newMetadata)-maxMetadata:] - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) - if err != nil { - return err - } - - return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) -} - -// Remove unassociates a metadata entry from a layer DiffID. -func (serv *V2MetadataService) Remove(metadata V2Metadata) error { - diffID, err := serv.GetDiffID(metadata.Digest) - if err != nil { - return err - } - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - if len(newMetadata) == 0 { - return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) -} diff --git a/distribution/metadata/v2_metadata_service_test.go b/distribution/metadata/v2_metadata_service_test.go deleted file mode 100644 index 7b0ecb157..000000000 --- a/distribution/metadata/v2_metadata_service_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package metadata - -import ( - "encoding/hex" - "io/ioutil" - "math/rand" - "os" - "reflect" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -func TestV2MetadataService(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") - if err != nil { - t.Fatalf("could not create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - metadataStore, err := NewFSMetadataStore(tmpDir) - if err != nil { - t.Fatalf("could not create metadata store: %v", err) - } - V2MetadataService := NewV2MetadataService(metadataStore) - - tooManyBlobSums := make([]V2Metadata, 100) - for i := range tooManyBlobSums { - randDigest := randomDigest() - tooManyBlobSums[i] = V2Metadata{Digest: randDigest} - } - - testVectors := []struct { - diffID layer.DiffID - metadata []V2Metadata - }{ - { - diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), - metadata: []V2Metadata{ - {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, - }, - }, - { - diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), - metadata: []V2Metadata{ - {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, - {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, - }, - }, - { - diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), - metadata: tooManyBlobSums, - }, - } - - // Set some associations - for _, vec := range testVectors { - for _, blobsum := range vec.metadata { - err := V2MetadataService.Add(vec.diffID, blobsum) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - } - } - - // Check the correct values are read back - for _, vec := range testVectors { - metadata, err := V2MetadataService.GetMetadata(vec.diffID) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - expectedMetadataEntries := len(vec.metadata) - if expectedMetadataEntries > 50 { - expectedMetadataEntries = 50 - } - if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { - t.Fatal("Get returned incorrect layer ID") - } - } - - // Test GetMetadata on a nonexistent entry - _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Test GetDiffID on a nonexistent entry - _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Overwrite one of the entries and read it back - err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) - if err != nil { - t.Fatalf("error calling Add: %v", err) - } - diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) - if err != nil { - t.Fatalf("error calling GetDiffID: %v", err) - } - if diffID != testVectors[1].diffID { - t.Fatal("GetDiffID returned incorrect diffID") - } -} - -func randomDigest() digest.Digest { - b := [32]byte{} - for i := 0; i < len(b); i++ { - b[i] = byte(rand.Intn(256)) - } - d := hex.EncodeToString(b[:]) - return digest.Digest("sha256:" + d) -} diff --git a/distribution/pull.go b/distribution/pull.go deleted file mode 100644 index dad93b656..000000000 --- a/distribution/pull.go +++ /dev/null @@ -1,225 +0,0 @@ -package distribution - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/api" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImagePullConfig stores pull configuration. -type ImagePullConfig struct { - // MetaHeaders stores HTTP headers with metadata about the image - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *types.AuthConfig - // ProgressOutput is the interface for showing the status of the pull - // operation. - ProgressOutput progress.Output - // RegistryService is the registry service to use for TLS configuration - // and endpoint lookup. - RegistryService registry.Service - // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) - // MetadataStore is the storage backend for distribution-specific - // metadata. - MetadataStore metadata.Store - // ImageStore manages images. - ImageStore image.Store - // ReferenceStore manages tags. - ReferenceStore reference.Store - // DownloadManager manages concurrent pulls. - DownloadManager *xfer.LayerDownloadManager -} - -// Puller is an interface that abstracts pulling for different API versions. -type Puller interface { - // Pull tries to pull the image referenced by `tag` - // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. - // - Pull(ctx context.Context, ref reference.Named) error -} - -// newPuller returns a Puller interface that will pull from either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 puller will be created. The other parameters are passed -// through to the underlying puller implementation for use during the actual -// pull operation. -func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Puller{ - V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - case registry.APIVersion1: - return &v1Puller{ - v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Pull initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - // makes sure name is not empty or `scratch` - if err := ValidateRepoName(repoInfo.Name()); err != nil { - return err - } - - endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) - if err != nil { - return err - } - - var ( - lastErr error - - // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. - // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in lastErr. - // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be - // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant - // error is the ones from v2 endpoints not v1. - discardNoSupportErrors bool - - // confirmedV2 is set to true if a pull attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - - puller, err := newPuller(endpoint, repoInfo, imagePullConfig) - if err != nil { - lastErr = err - continue - } - if err := puller.Pull(ctx, ref); err != nil { - // Was this pull cancelled? If so, don't try to fall - // back. - fallback := false - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - fallback = true - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - } - } - if fallback { - if _, ok := err.(ErrNoSupport); !ok { - // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. - discardNoSupportErrors = true - // append subsequent errors - lastErr = err - } else if !discardNoSupportErrors { - // Save the ErrNoSupport error, because it's either the first error or all encountered errors - // were also ErrNoSupport errors. - // append subsequent errors - lastErr = err - } - logrus.Errorf("Attempting next endpoint for pull after error: %v", err) - continue - } - logrus.Errorf("Not continuing with pull after error: %v", err) - return err - } - - imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) - } - - return lastErr -} - -// writeStatus writes a status message to out. If layersDownloaded is true, the -// status message indicates that a newer image was downloaded. Otherwise, it -// indicates that the image is up to date. requestedTag is the tag the message -// will refer to. -func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { - if layersDownloaded { - progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) - } else { - progress.Message(out, "", "Status: Image is up to date for "+requestedTag) - } -} - -// ValidateRepoName validates the name of a repository. -func ValidateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - if name == api.NoBaseImageSpecifier { - return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) - } - return nil -} - -func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, imageID image.ID) error { - dgstRef, err := reference.WithDigest(ref, dgst) - if err != nil { - return err - } - - if oldTagImageID, err := store.Get(dgstRef); err == nil { - if oldTagImageID != imageID { - // Updating digests not supported by reference store - logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagImageID, imageID) - } - return nil - } else if err != reference.ErrDoesNotExist { - return err - } - - return store.AddDigest(dgstRef, imageID, true) -} diff --git a/distribution/pull_v1.go b/distribution/pull_v1.go deleted file mode 100644 index 3b7175e91..000000000 --- a/distribution/pull_v1.go +++ /dev/null @@ -1,366 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/url" - "os" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -type v1Puller struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - session *registry.Session -} - -func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { - if _, isCanonical := ref.(reference.Canonical); isCanonical { - // Allowing fallback, because HTTPS v1 is before HTTP v2 - return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} - } - - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was ReceiveTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - logrus.Debugf("Fallback from error: %s", err) - return fallbackError{err: err} - } - if err := p.pullRepository(ctx, ref); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") - - return nil -} - -func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { - progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) - - tagged, isTagged := ref.(reference.NamedTagged) - - repoData, err := p.session.GetRepositoryData(p.repoInfo) - if err != nil { - if strings.Contains(err.Error(), "HTTP code: 404") { - if isTagged { - return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) - } - return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) - } - // Unexpected HTTP error - return err - } - - logrus.Debug("Retrieving the tag list") - var tagsList map[string]string - if !isTagged { - tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) - } else { - var tagID string - tagsList = make(map[string]string) - tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) - if err == registry.ErrRepoNotFound { - return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) - } - tagsList[tagged.Tag()] = tagID - } - if err != nil { - logrus.Errorf("unable to get remote tags: %s", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - layersDownloaded := false - for _, imgData := range repoData.ImgList { - if isTagged && imgData.Tag != tagged.Tag() { - continue - } - - err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) - if err != nil { - return err - } - } - - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) - return nil -} - -func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { - if img.Tag == "" { - logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - return nil - } - - localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) - if err != nil { - retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) - logrus.Debug(retErr.Error()) - return retErr - } - - if err := v1.ValidateID(img.ID); err != nil { - return err - } - - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) - success := false - var lastErr error - for _, ep := range p.repoInfo.Index.Mirrors { - ep += "v1/" - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // Don't report errors when pulling from mirrors. - logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) - continue - } - success = true - break - } - if !success { - for _, ep := range repoData.Endpoints { - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // It's not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) - continue - } - success = true - break - } - } - if !success { - err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) - progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) - return err - } - return nil -} - -func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { - var history []string - history, err = p.session.GetRemoteHistory(v1ID, endpoint) - if err != nil { - return err - } - if len(history) < 1 { - return fmt.Errorf("empty history for image %s", v1ID) - } - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") - - var ( - descriptors []xfer.DownloadDescriptor - newHistory []image.History - imgJSON []byte - imgSize int64 - ) - - // Iterate over layers, in order from bottom-most to top-most. Download - // config for all layers and create descriptors. - for i := len(history) - 1; i >= 0; i-- { - v1LayerID := history[i] - imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) - if err != nil { - return err - } - - // Create a new-style config from the legacy configs - h, err := v1.HistoryFromConfig(imgJSON, false) - if err != nil { - return err - } - newHistory = append(newHistory, h) - - layerDescriptor := &v1LayerDescriptor{ - v1LayerID: v1LayerID, - indexName: p.repoInfo.Index.Name, - endpoint: endpoint, - v1IDService: p.v1IDService, - layersDownloaded: layersDownloaded, - layerSize: imgSize, - session: p.session, - } - - descriptors = append(descriptors, layerDescriptor) - } - - rootFS := image.NewRootFS() - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) - if err != nil { - return err - } - defer release() - - config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) - if err != nil { - return err - } - - imageID, err := p.config.ImageStore.Create(config) - if err != nil { - return err - } - - if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { - return err - } - - return nil -} - -func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") - - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) - if err != nil && j == retries { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") - return nil, 0, err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - - return imgJSON, imgSize, nil - } - - // not reached - return nil, 0, nil -} - -type v1LayerDescriptor struct { - v1LayerID string - indexName string - endpoint string - v1IDService *metadata.V1IDService - layersDownloaded *bool - layerSize int64 - session *registry.Session - tmpFile *os.File -} - -func (ld *v1LayerDescriptor) Key() string { - return "v1:" + ld.v1LayerID -} - -func (ld *v1LayerDescriptor) ID() string { - return stringid.TruncateID(ld.v1LayerID) -} - -func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { - return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) -} - -func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - progress.Update(progressOutput, ld.ID(), "Pulling fs layer") - layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) - if err != nil { - progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - if terr, ok := err.(net.Error); ok && terr.Timeout() { - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - *ld.layersDownloaded = true - - ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") - if err != nil { - layerReader.Close() - return nil, 0, err - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") - defer reader.Close() - - _, err = io.Copy(ld.tmpFile, reader) - if err != nil { - ld.Close() - return nil, 0, err - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) - - ld.tmpFile.Seek(0, 0) - - // hand off the temporary file to the download manager, so it will only - // be closed once - tmpFile := ld.tmpFile - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), ld.layerSize, nil -} - -func (ld *v1LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile = nil - } -} - -func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) -} diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go deleted file mode 100644 index c78e221f0..000000000 --- a/distribution/pull_v2.go +++ /dev/null @@ -1,845 +0,0 @@ -package distribution - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "runtime" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") - -// ImageConfigPullError is an error pulling the image config blob -// (only applies to schema2). -type ImageConfigPullError struct { - Err error -} - -// Error returns the error string for ImageConfigPullError. -func (e ImageConfigPullError) Error() string { - return "error pulling image configuration: " + e.Err.Error() -} - -type v2Puller struct { - V2MetadataService *metadata.V2MetadataService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - repo distribution.Repository - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool -} - -func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { - // TODO(tiborvass): was ReceiveTimeout - p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - logrus.Warnf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pullV2Repository(ctx, ref); err != nil { - if _, ok := err.(fallbackError); ok { - return err - } - if continueOnError(err) { - logrus.Errorf("Error trying v2 registry: %v", err) - return fallbackError{ - err: err, - confirmedV2: p.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { - var layersDownloaded bool - if !reference.IsNameOnly(ref) { - layersDownloaded, err = p.pullV2Tag(ctx, ref) - if err != nil { - return err - } - } else { - tags, err := p.repo.Tags(ctx).All(ctx) - if err != nil { - // If this repository doesn't exist on V2, we should - // permit a fallback to V1. - return allowV1Fallback(err) - } - - // The v2 registry knows about this repository, so we will not - // allow fallback to the v1 protocol even if we encounter an - // error later on. - p.confirmedV2 = true - - for _, tag := range tags { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - return err - } - pulledNew, err := p.pullV2Tag(ctx, tagRef) - if err != nil { - // Since this is the pull-all-tags case, don't - // allow an error pulling a particular tag to - // make the whole pull fall back to v1. - if fallbackErr, ok := err.(fallbackError); ok { - return fallbackErr.err - } - return err - } - // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged - // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? - layersDownloaded = layersDownloaded || pulledNew - } - } - - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) - - return nil -} - -type v2LayerDescriptor struct { - digest digest.Digest - repoInfo *registry.RepositoryInfo - repo distribution.Repository - V2MetadataService *metadata.V2MetadataService - tmpFile *os.File - verifier digest.Verifier - src distribution.Descriptor -} - -func (ld *v2LayerDescriptor) Key() string { - return "v2:" + ld.digest.String() -} - -func (ld *v2LayerDescriptor) ID() string { - return stringid.TruncateID(ld.digest.String()) -} - -func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { - return ld.V2MetadataService.GetDiffID(ld.digest) -} - -func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - logrus.Debugf("pulling blob %q", ld.digest) - - var ( - err error - offset int64 - ) - - if ld.tmpFile == nil { - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else { - offset, err = ld.tmpFile.Seek(0, os.SEEK_END) - if err != nil { - logrus.Debugf("error seeking to end of download file: %v", err) - offset = 0 - - ld.tmpFile.Close() - if err := os.Remove(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else if offset != 0 { - logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) - } - } - - tmpFile := ld.tmpFile - - layerDownload, err := ld.open(ctx) - if err != nil { - logrus.Errorf("Error initiating layer download: %v", err) - if err == distribution.ErrBlobUnknown { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, retryOnError(err) - } - - if offset != 0 { - _, err := layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - } - size, err := layerDownload.Seek(0, os.SEEK_END) - if err != nil { - // Seek failed, perhaps because there was no Content-Length - // header. This shouldn't fail the download, because we can - // still continue without a progress bar. - size = 0 - } else { - if size != 0 && offset > size { - logrus.Debug("Partial download is larger than full blob. Starting over") - offset = 0 - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } - - // Restore the seek offset either at the beginning of the - // stream, or just after the last byte we have from previous - // attempts. - _, err = layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - return nil, 0, err - } - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") - defer reader.Close() - - if ld.verifier == nil { - ld.verifier, err = digest.NewDigestVerifier(ld.digest) - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } - - _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) - if err != nil { - if err == transport.ErrWrongCodeForByteRange { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - return nil, 0, retryOnError(err) - } - - progress.Update(progressOutput, ld.ID(), "Verifying Checksum") - - if !ld.verifier.Verified() { - err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) - logrus.Error(err) - - // Allow a retry if this digest verification error happened - // after a resumed download. - if offset != 0 { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) - - _, err = tmpFile.Seek(0, os.SEEK_SET) - if err != nil { - tmpFile.Close() - if err := os.Remove(tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - ld.tmpFile = nil - ld.verifier = nil - return nil, 0, xfer.DoNotRetry{Err: err} - } - - // hand off the temporary file to the download manager, so it will only - // be closed once - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), size, nil -} - -func (ld *v2LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - } -} - -func (ld *v2LayerDescriptor) truncateDownloadFile() error { - // Need a new hash context since we will be redoing the download - ld.verifier = nil - - if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { - logrus.Errorf("error seeking to beginning of download file: %v", err) - return err - } - - if err := ld.tmpFile.Truncate(0); err != nil { - logrus.Errorf("error truncating download file: %v", err) - return err - } - - return nil -} - -func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) -} - -func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return false, err - } - - var ( - manifest distribution.Manifest - tagOrDigest string // Used for logging/progress only - ) - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - // NOTE: not using TagService.Get, since it uses HEAD requests - // against the manifests endpoint, which are not supported by - // all registry versions. - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else if digested, isDigested := ref.(reference.Canonical); isDigested { - manifest, err = manSvc.Get(ctx, digested.Digest()) - if err != nil { - return false, err - } - tagOrDigest = digested.Digest().String() - } else { - return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) - } - - if manifest == nil { - return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) - } - - // If manSvc.Get succeeded, we can be confident that the registry on - // the other side speaks the v2 protocol. - p.confirmedV2 = true - - logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) - progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) - - var ( - imageID image.ID - manifestDigest digest.Digest - ) - - switch v := manifest.(type) { - case *schema1.SignedManifest: - imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) - if err != nil { - return false, err - } - case *schema2.DeserializedManifest: - imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) - if err != nil { - return false, err - } - case *manifestlist.DeserializedManifestList: - imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) - if err != nil { - return false, err - } - default: - return false, errors.New("unsupported manifest format") - } - - progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) - - oldTagImageID, err := p.config.ReferenceStore.Get(ref) - if err == nil { - if oldTagImageID == imageID { - return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID) - } - } else if err != reference.ErrDoesNotExist { - return false, err - } - - if canonical, ok := ref.(reference.Canonical); ok { - if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { - return false, err - } - } else { - if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil { - return false, err - } - if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { - return false, err - } - } - return true, nil -} - -func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { - var verifiedManifest *schema1.Manifest - verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) - if err != nil { - return "", "", err - } - - rootFS := image.NewRootFS() - - if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { - return "", "", err - } - - // remove duplicate layers and check parent chain validity - err = fixManifestLayers(verifiedManifest) - if err != nil { - return "", "", err - } - - var descriptors []xfer.DownloadDescriptor - - // Image history converted to the new format - var history []image.History - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { - blobSum := verifiedManifest.FSLayers[i].BlobSum - - var throwAway struct { - ThrowAway bool `json:"throwaway,omitempty"` - } - if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { - return "", "", err - } - - h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) - if err != nil { - return "", "", err - } - history = append(history, h) - - if throwAway.ThrowAway { - continue - } - - layerDescriptor := &v2LayerDescriptor{ - digest: blobSum, - repoInfo: p.repoInfo, - repo: p.repo, - V2MetadataService: p.V2MetadataService, - } - - descriptors = append(descriptors, layerDescriptor) - } - - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) - if err != nil { - return "", "", err - } - defer release() - - config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) - if err != nil { - return "", "", err - } - - imageID, err = p.config.ImageStore.Create(config) - if err != nil { - return "", "", err - } - - manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) - - return imageID, manifestDigest, nil -} - -func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { - manifestDigest, err = schema2ManifestDigest(ref, mfst) - if err != nil { - return "", "", err - } - - target := mfst.Target() - imageID = image.ID(target.Digest) - if _, err := p.config.ImageStore.Get(imageID); err == nil { - // If the image already exists locally, no need to pull - // anything. - return imageID, manifestDigest, nil - } - - var descriptors []xfer.DownloadDescriptor - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for _, d := range mfst.Layers { - layerDescriptor := &v2LayerDescriptor{ - digest: d.Digest, - repo: p.repo, - repoInfo: p.repoInfo, - V2MetadataService: p.V2MetadataService, - src: d, - } - - descriptors = append(descriptors, layerDescriptor) - } - - configChan := make(chan []byte, 1) - errChan := make(chan error, 1) - var cancel func() - ctx, cancel = context.WithCancel(ctx) - - // Pull the image config - go func() { - configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) - if err != nil { - errChan <- ImageConfigPullError{Err: err} - cancel() - return - } - configChan <- configJSON - }() - - var ( - configJSON []byte // raw serialized image config - unmarshalledConfig image.Image // deserialized image config - downloadRootFS image.RootFS // rootFS to use for registering layers. - ) - if runtime.GOOS == "windows" { - configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) - if err != nil { - return "", "", err - } - if unmarshalledConfig.RootFS == nil { - return "", "", errors.New("image config has no rootfs section") - } - downloadRootFS = *unmarshalledConfig.RootFS - downloadRootFS.DiffIDs = []layer.DiffID{} - } else { - downloadRootFS = *image.NewRootFS() - } - - rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) - if err != nil { - if configJSON != nil { - // Already received the config - return "", "", err - } - select { - case err = <-errChan: - return "", "", err - default: - cancel() - select { - case <-configChan: - case <-errChan: - } - return "", "", err - } - } - defer release() - - if configJSON == nil { - configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) - if err != nil { - return "", "", err - } - } - - // The DiffIDs returned in rootFS MUST match those in the config. - // Otherwise the image config could be referencing layers that aren't - // included in the manifest. - if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { - return "", "", errRootFSMismatch - } - - for i := range rootFS.DiffIDs { - if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { - return "", "", errRootFSMismatch - } - } - - imageID, err = p.config.ImageStore.Create(configJSON) - if err != nil { - return "", "", err - } - - return imageID, manifestDigest, nil -} - -func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { - select { - case configJSON := <-configChan: - var unmarshalledConfig image.Image - if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { - return nil, image.Image{}, err - } - return configJSON, unmarshalledConfig, nil - case err := <-errChan: - return nil, image.Image{}, err - // Don't need a case for ctx.Done in the select because cancellation - // will trigger an error in p.pullSchema2ImageConfig. - } -} - -// pullManifestList handles "manifest lists" which point to various -// platform-specifc manifests. -func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { - manifestListDigest, err = schema2ManifestDigest(ref, mfstList) - if err != nil { - return "", "", err - } - - var manifestDigest digest.Digest - for _, manifestDescriptor := range mfstList.Manifests { - // TODO(aaronl): The manifest list spec supports optional - // "features" and "variant" fields. These are not yet used. - // Once they are, their values should be interpreted here. - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { - manifestDigest = manifestDescriptor.Digest - break - } - } - - if manifestDigest == "" { - return "", "", errors.New("no supported platform found in manifest list") - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return "", "", err - } - - manifest, err := manSvc.Get(ctx, manifestDigest) - if err != nil { - return "", "", err - } - - manifestRef, err := reference.WithDigest(ref, manifestDigest) - if err != nil { - return "", "", err - } - - switch v := manifest.(type) { - case *schema1.SignedManifest: - imageID, _, err = p.pullSchema1(ctx, manifestRef, v) - if err != nil { - return "", "", err - } - case *schema2.DeserializedManifest: - imageID, _, err = p.pullSchema2(ctx, manifestRef, v) - if err != nil { - return "", "", err - } - default: - return "", "", errors.New("unsupported manifest format") - } - - return imageID, manifestListDigest, err -} - -func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { - blobs := p.repo.Blobs(ctx) - configJSON, err = blobs.Get(ctx, dgst) - if err != nil { - return nil, err - } - - // Verify image config digest - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } - if _, err := verifier.Write(configJSON); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image config verification failed for digest %s", dgst) - logrus.Error(err) - return nil, err - } - - return configJSON, nil -} - -// schema2ManifestDigest computes the manifest digest, and, if pulling by -// digest, ensures that it matches the requested digest. -func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { - _, canonical, err := mfst.Payload() - if err != nil { - return "", err - } - - // If pull by digest, then verify the manifest digest. - if digested, isDigested := ref.(reference.Canonical); isDigested { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return "", err - } - if _, err := verifier.Write(canonical); err != nil { - return "", err - } - if !verifier.Verified() { - err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return "", err - } - return digested.Digest(), nil - } - - return digest.FromBytes(canonical), nil -} - -// allowV1Fallback checks if the error is a possible reason to fallback to v1 -// (even if confirmedV2 has been set already), and if so, wraps the error in -// a fallbackError with confirmedV2 set to false. Otherwise, it returns the -// error unmodified. -func allowV1Fallback(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - } - case errcode.Error: - if shouldV2Fallback(v) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - case *url.Error: - if v.Err == auth.ErrNoBasicAuthCredentials { - return fallbackError{err: err, confirmedV2: false} - } - } - - return err -} - -func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { - // If pull by digest, then verify the manifest digest. NOTE: It is - // important to do this first, before any other content validation. If the - // digest cannot be verified, don't even bother with those other things. - if digested, isCanonical := ref.(reference.Canonical); isCanonical { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return nil, err - } - if _, err := verifier.Write(signedManifest.Canonical); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return nil, err - } - } - m = &signedManifest.Manifest - - if m.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) - } - if len(m.FSLayers) != len(m.History) { - return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) - } - if len(m.FSLayers) == 0 { - return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) - } - return m, nil -} - -// fixManifestLayers removes repeated layers from the manifest and checks the -// correctness of the parent chain. -func fixManifestLayers(m *schema1.Manifest) error { - imgs := make([]*image.V1Image, len(m.FSLayers)) - for i := range m.FSLayers { - img := &image.V1Image{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := v1.ValidateID(img.ID); err != nil { - return err - } - } - - if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { - // Windows base layer can point to a base layer parent that is not in manifest. - return errors.New("Invalid parent ID in the base layer of the image.") - } - - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) - } - } - - return nil -} - -func createDownloadFile() (*os.File, error) { - return ioutil.TempFile("", "GetImageBlob") -} diff --git a/distribution/pull_v2_test.go b/distribution/pull_v2_test.go deleted file mode 100644 index 8555c81e6..000000000 --- a/distribution/pull_v2_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package distribution - -import ( - "encoding/json" - "io/ioutil" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/reference" -) - -// TestFixManifestLayers checks that fixManifestLayers removes a duplicate -// layer, and that it makes no changes to the manifest when called a second -// time, after the duplicate is removed. -func TestFixManifestLayers(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - duplicateLayerManifestExpectedOutput := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") - } - - // Run fixManifestLayers again and confirm that it doesn't change the - // manifest (which no longer has duplicate layers). - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") - } -} - -// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails -// if the base layer configuration specifies a parent. -func TestFixManifestLayersBaseLayerParent(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails -// if an image configuration specifies a parent that doesn't directly follow -// that (deduplicated) image in the image history. -func TestFixManifestLayersBadParent(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestValidateManifest verifies the validateManifest function -func TestValidateManifest(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") - if err != nil { - t.Fatal("could not parse reference") - } - expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - - // Good manifest - - goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var goodSignedManifest schema1.SignedManifest - err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in good manifest") - } - - // "Extra data" manifest - - extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var extraDataSignedManifest schema1.SignedManifest - err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in extra data manifest") - } - - // Bad manifest - - badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var badSignedManifest schema1.SignedManifest - err = json.Unmarshal(badManifestBytes, &badSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) - if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { - t.Fatal("expected validateManifest to fail with digest error") - } -} diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go deleted file mode 100644 index cdd7806ad..000000000 --- a/distribution/pull_v2_unix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package distribution - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/image" -) - -func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { - return nil -} - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) -} diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go deleted file mode 100644 index f98825d0b..000000000 --- a/distribution/pull_v2_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build windows - -package distribution - -import ( - "encoding/json" - "fmt" - "net/http" - "os" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/image" -) - -func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { - v1img := &image.V1Image{} - if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil { - return err - } - if v1img.Parent == "" { - return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID) - } - // There must be an image that already references the baselayer. - for _, img := range is.Map() { - if img.RootFS.Type == image.TypeLayersWithBase && img.RootFS.BaseLayerID() == v1img.Parent { - rootFS.BaseLayer = img.RootFS.BaseLayer - rootFS.Type = image.TypeLayersWithBase - return nil - } - } - return fmt.Errorf("Invalid base layer %q", v1img.Parent) -} - -var _ distribution.Describable = &v2LayerDescriptor{} - -func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { - if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { - return ld.src - } - return distribution.Descriptor{} -} - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - if len(ld.src.URLs) == 0 { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) - } - - var ( - err error - rsc distribution.ReadSeekCloser - ) - - // Find the first URL that results in a 200 result code. - for _, url := range ld.src.URLs { - rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) - _, err = rsc.Seek(0, os.SEEK_SET) - if err == nil { - break - } - rsc.Close() - rsc = nil - } - return rsc, err -} diff --git a/distribution/push.go b/distribution/push.go deleted file mode 100644 index f3b54b7cf..000000000 --- a/distribution/push.go +++ /dev/null @@ -1,219 +0,0 @@ -package distribution - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -// ImagePushConfig stores push configuration. -type ImagePushConfig struct { - // MetaHeaders store HTTP headers with metadata about the image - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *types.AuthConfig - // ProgressOutput is the interface for showing the status of the push - // operation. - ProgressOutput progress.Output - // RegistryService is the registry service to use for TLS configuration - // and endpoint lookup. - RegistryService registry.Service - // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) - // MetadataStore is the storage backend for distribution-specific - // metadata. - MetadataStore metadata.Store - // LayerStore manages layers. - LayerStore layer.Store - // ImageStore manages images. - ImageStore image.Store - // ReferenceStore manages tags. - ReferenceStore reference.Store - // TrustKey is the private key for legacy signatures. This is typically - // an ephemeral key, since these signatures are no longer verified. - TrustKey libtrust.PrivateKey - // UploadManager dispatches uploads. - UploadManager *xfer.LayerUploadManager -} - -// Pusher is an interface that abstracts pushing for different API versions. -type Pusher interface { - // Push tries to push the image configured at the creation of Pusher. - // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. - // - // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. - Push(ctx context.Context) error -} - -const compressionBufSize = 32768 - -// NewPusher creates a new Pusher interface that will push to either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 pusher will be created. The other parameters are passed -// through to the underlying pusher implementation for use during the actual -// push operation. -func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Pusher{ - v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - case registry.APIVersion1: - return &v1Pusher{ - v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Push initiates a push operation on ref. -// ref is the specific variant of the image to be pushed. -// If no tag is provided, all tags will be pushed. -func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { - // FIXME: Allow to interrupt current push when new push of same image is done. - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) - if err != nil { - return err - } - - progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) - - associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) - if len(associations) == 0 { - return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) - } - - var ( - lastErr error - - // confirmedV2 is set to true if a push attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) - - pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) - if err != nil { - lastErr = err - continue - } - if err := pusher.Push(ctx); err != nil { - // Was this push cancelled? If so, don't try to fall - // back. - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - lastErr = err - logrus.Errorf("Attempting next endpoint for push after error: %v", err) - continue - } - } - - logrus.Errorf("Not continuing with push after error: %v", err) - return err - } - - imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) - } - return lastErr -} - -// compress returns an io.ReadCloser which will supply a compressed version of -// the provided Reader. The caller must close the ReadCloser after reading the -// compressed data. -// -// Note that this function returns a reader instead of taking a writer as an -// argument so that it can be used with httpBlobWriter's ReadFrom method. -// Using httpBlobWriter's Write method would send a PATCH request for every -// Write call. -// -// The second return value is a channel that gets closed when the goroutine -// is finished. This allows the caller to make sure the goroutine finishes -// before it releases any resources connected with the reader that was -// passed in. -func compress(in io.Reader) (io.ReadCloser, chan struct{}) { - compressionDone := make(chan struct{}) - - pipeReader, pipeWriter := io.Pipe() - // Use a bufio.Writer to avoid excessive chunking in HTTP request. - bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) - compressor := gzip.NewWriter(bufWriter) - - go func() { - _, err := io.Copy(compressor, in) - if err == nil { - err = compressor.Close() - } - if err == nil { - err = bufWriter.Flush() - } - if err != nil { - pipeWriter.CloseWithError(err) - } else { - pipeWriter.Close() - } - close(compressionDone) - }() - - return pipeReader, compressionDone -} diff --git a/distribution/push_v1.go b/distribution/push_v1.go deleted file mode 100644 index b6e4a1304..000000000 --- a/distribution/push_v1.go +++ /dev/null @@ -1,454 +0,0 @@ -package distribution - -import ( - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -type v1Pusher struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - ref reference.Named - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - session *registry.Session -} - -func (p *v1Pusher) Push(ctx context.Context) error { - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was NoTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - return fallbackError{err: err} - } - if err := p.pushRepository(ctx); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - return nil -} - -// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an -// image being pushed to a v1 registry. -type v1Image interface { - Config() []byte - Layer() layer.Layer - V1ID() string -} - -type v1ImageCommon struct { - layer layer.Layer - config []byte - v1ID string -} - -func (common *v1ImageCommon) Config() []byte { - return common.config -} - -func (common *v1ImageCommon) V1ID() string { - return common.v1ID -} - -func (common *v1ImageCommon) Layer() layer.Layer { - return common.layer -} - -// v1TopImage defines a runnable (top layer) image being pushed to a v1 -// registry. -type v1TopImage struct { - v1ImageCommon - imageID image.ID -} - -func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { - v1ID := digest.Digest(imageID).Hex() - parentV1ID := "" - if parent != nil { - parentV1ID = parent.V1ID() - } - - config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) - if err != nil { - return nil, err - } - - return &v1TopImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: config, - layer: l, - }, - imageID: imageID, - }, nil -} - -// v1DependencyImage defines a dependency layer being pushed to a v1 registry. -type v1DependencyImage struct { - v1ImageCommon -} - -func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { - v1ID := digest.Digest(l.ChainID()).Hex() - - config := "" - if parent != nil { - config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) - } else { - config = fmt.Sprintf(`{"id":"%s"}`, v1ID) - } - return &v1DependencyImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: []byte(config), - layer: l, - }, - }, nil -} - -// Retrieve the all the images to be uploaded in the correct order -func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { - tagsByImage = make(map[image.ID][]string) - - // Ignore digest references - if _, isCanonical := p.ref.(reference.Canonical); isCanonical { - return - } - - tagged, isTagged := p.ref.(reference.NamedTagged) - if isTagged { - // Push a specific tag - var imgID image.ID - imgID, err = p.config.ReferenceStore.Get(p.ref) - if err != nil { - return - } - - imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) - if err != nil { - return - } - - tagsByImage[imgID] = []string{tagged.Tag()} - - return - } - - imagesSeen := make(map[image.ID]struct{}) - dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) - - associations := p.config.ReferenceStore.ReferencesByName(p.ref) - for _, association := range associations { - if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { - // Ignore digest references. - continue - } - - tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) - - if _, present := imagesSeen[association.ImageID]; present { - // Skip generating image list for already-seen image - continue - } - imagesSeen[association.ImageID] = struct{}{} - - imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) - if err != nil { - return nil, nil, nil, err - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - if len(imageList) == 0 { - return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") - } - logrus.Debugf("Image list: %v", imageList) - logrus.Debugf("Tags by image: %v", tagsByImage) - - return -} - -func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { - img, err := p.config.ImageStore.Get(imgID) - if err != nil { - return nil, err - } - - topLayerID := img.RootFS.ChainID() - - var l layer.Layer - if topLayerID == "" { - l = layer.EmptyLayer - } else { - l, err = p.config.LayerStore.Get(topLayerID) - *referencedLayers = append(*referencedLayers, l) - if err != nil { - return nil, fmt.Errorf("failed to get top layer from image: %v", err) - } - } - - dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) - if err != nil { - return nil, err - } - - topImage, err := newV1TopImage(imgID, img, l, parent) - if err != nil { - return nil, err - } - - imageListForThisTag = append(dependencyImages, topImage) - - return -} - -func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { - if l == nil { - return nil, nil, nil - } - - imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) - - if dependenciesSeen != nil { - if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { - // This layer is already on the list, we can ignore it - // and all its parents. - return imageListForThisTag, dependencyImage, nil - } - } - - dependencyImage, err := newV1DependencyImage(l, parent) - if err != nil { - return nil, nil, err - } - imageListForThisTag = append(imageListForThisTag, dependencyImage) - - if dependenciesSeen != nil { - dependenciesSeen[l.ChainID()] = dependencyImage - } - - return imageListForThisTag, dependencyImage, nil -} - -// createImageIndex returns an index of an image's layer IDs and tags. -func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { - var imageIndex []*registry.ImgData - for _, img := range images { - v1ID := img.V1ID() - - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - if tags, hasTags := tags[topImage.imageID]; hasTags { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: tag, - }) - } - continue - } - } - - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is associated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: "", - }) - } - return imageIndex -} - -// lookupImageOnEndpoint checks the specified endpoint to see if an image exists -// and if it is absent then it sends the image id to the channel to be pushed. -func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { - defer wg.Done() - for image := range images { - v1ID := image.V1ID() - truncID := stringid.TruncateID(image.Layer().DiffID().String()) - if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { - logrus.Errorf("Error in LookupRemoteImage: %s", err) - imagesToPush <- v1ID - progress.Update(p.config.ProgressOutput, truncID, "Waiting") - } else { - progress.Update(p.config.ProgressOutput, truncID, "Already exists") - } - } -} - -func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { - workerCount := len(imageList) - // start a maximum of 5 workers to check if images exist on the specified endpoint. - if workerCount > 5 { - workerCount = 5 - } - var ( - wg = &sync.WaitGroup{} - imageData = make(chan v1Image, workerCount*2) - imagesToPush = make(chan string, workerCount*2) - pushes = make(chan map[string]struct{}, 1) - ) - for i := 0; i < workerCount; i++ { - wg.Add(1) - go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) - } - // start a go routine that consumes the images to push - go func() { - shouldPush := make(map[string]struct{}) - for id := range imagesToPush { - shouldPush[id] = struct{}{} - } - pushes <- shouldPush - }() - for _, v1Image := range imageList { - imageData <- v1Image - } - // close the channel to notify the workers that there will be no more images to check. - close(imageData) - wg.Wait() - close(imagesToPush) - // wait for all the images that require pushes to be collected into a consumable map. - shouldPush := <-pushes - // finish by pushing any images and tags to the endpoint. The order that the images are pushed - // is very important that is why we are still iterating over the ordered list of imageIDs. - for _, img := range imageList { - v1ID := img.V1ID() - if _, push := shouldPush[v1ID]; push { - if _, err := p.pushImage(ctx, img, endpoint); err != nil { - // FIXME: Continue on error? - return err - } - } - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - for _, tag := range tags[topImage.imageID] { - progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) - if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { - return err - } - } - } - } - return nil -} - -// pushRepository pushes layers that do not already exist on the registry. -func (p *v1Pusher) pushRepository(ctx context.Context) error { - imgList, tags, referencedLayers, err := p.getImageList() - defer func() { - for _, l := range referencedLayers { - p.config.LayerStore.Release(l) - } - }() - if err != nil { - return err - } - - imageIndex := createImageIndex(imgList, tags) - for _, data := range imageIndex { - logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) - } - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) - if err != nil { - return err - } - // push the repository to each of the endpoints only if it does not exist. - for _, endpoint := range repoData.Endpoints { - if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { - return err - } - } - _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) - return err -} - -func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { - l := v1Image.Layer() - v1ID := v1Image.V1ID() - truncID := stringid.TruncateID(l.DiffID().String()) - - jsonRaw := v1Image.Config() - progress.Update(p.config.ProgressOutput, truncID, "Pushing") - - // General rule is to use ID for graph accesses and compatibilityID for - // calls to session.registry() - imgData := ®istry.ImgData{ - ID: v1ID, - } - - // Send the json - if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { - if err == registry.ErrAlreadyExists { - progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") - return "", nil - } - return "", err - } - - arch, err := l.TarStream() - if err != nil { - return "", err - } - defer arch.Close() - - // don't care if this fails; best effort - size, _ := l.DiffSize() - - // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") - defer reader.Close() - - checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { - return "", err - } - - if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { - logrus.Warnf("Could not set v1 ID mapping: %v", err) - } - - progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") - return imgData.Checksum, nil -} diff --git a/distribution/push_v2.go b/distribution/push_v2.go deleted file mode 100644 index 7d331f43d..000000000 --- a/distribution/push_v2.go +++ /dev/null @@ -1,452 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - distreference "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest digest.Digest - Size int -} - -type v2Pusher struct { - v2MetadataService *metadata.V2MetadataService - ref reference.Named - endpoint registry.APIEndpoint - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - repo distribution.Repository - - // pushState is state built by the Upload functions. - pushState pushState -} - -type pushState struct { - sync.Mutex - // remoteLayers is the set of layers known to exist on the remote side. - // This avoids redundant queries when pushing multiple tags that - // involve the same layers. It is also used to fill in digest and size - // information when building the manifest. - remoteLayers map[layer.DiffID]distribution.Descriptor - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool -} - -func (p *v2Pusher) Push(ctx context.Context) (err error) { - p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) - - p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") - if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pushV2Repository(ctx); err != nil { - if continueOnError(err) { - return fallbackError{ - err: err, - confirmedV2: p.pushState.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { - if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { - imageID, err := p.config.ReferenceStore.Get(p.ref) - if err != nil { - return fmt.Errorf("tag does not exist: %s", p.ref.String()) - } - - return p.pushV2Tag(ctx, namedTagged, imageID) - } - - if !reference.IsNameOnly(p.ref) { - return errors.New("cannot push a digest reference") - } - - // Pull all tags - pushed := 0 - for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { - if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { - pushed++ - if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { - return err - } - } - } - - if pushed == 0 { - return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) - } - - return nil -} - -func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { - logrus.Debugf("Pushing repository: %s", ref.String()) - - img, err := p.config.ImageStore.Get(imageID) - if err != nil { - return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) - } - - var l layer.Layer - - topLayerID := img.RootFS.ChainID() - if topLayerID == "" { - l = layer.EmptyLayer - } else { - l, err = p.config.LayerStore.Get(topLayerID) - if err != nil { - return fmt.Errorf("failed to get top layer from image: %v", err) - } - defer layer.ReleaseAndLog(p.config.LayerStore, l) - } - - var descriptors []xfer.UploadDescriptor - - descriptorTemplate := v2PushDescriptor{ - v2MetadataService: p.v2MetadataService, - repoInfo: p.repoInfo, - ref: p.ref, - repo: p.repo, - pushState: &p.pushState, - } - - // Loop bounds condition is to avoid pushing the base layer on Windows. - for i := 0; i < len(img.RootFS.DiffIDs); i++ { - descriptor := descriptorTemplate - descriptor.layer = l - descriptors = append(descriptors, &descriptor) - - l = l.Parent() - } - - if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { - return err - } - - // Try schema2 first - builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) - manifest, err := manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return err - } - - putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) - - manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) - if err != nil { - return err - } - builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) - manifest, err = manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - return err - } - } - - var canonicalManifest []byte - - switch v := manifest.(type) { - case *schema1.SignedManifest: - canonicalManifest = v.Canonical - case *schema2.DeserializedManifest: - _, canonicalManifest, err = v.Payload() - if err != nil { - return err - } - } - - manifestDigest := digest.FromBytes(canonicalManifest) - progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) - - if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil { - return err - } - - // Signal digest to the trust client so it can sign the - // push, if appropriate. - progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) - - return nil -} - -func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { - // descriptors is in reverse order; iterate backwards to get references - // appended in the right order. - for i := len(descriptors) - 1; i >= 0; i-- { - if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { - return nil, err - } - } - - return builder.Build(ctx) -} - -type v2PushDescriptor struct { - layer layer.Layer - v2MetadataService *metadata.V2MetadataService - repoInfo reference.Named - ref reference.Named - repo distribution.Repository - pushState *pushState - remoteDescriptor distribution.Descriptor -} - -func (pd *v2PushDescriptor) Key() string { - return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() -} - -func (pd *v2PushDescriptor) ID() string { - return stringid.TruncateID(pd.layer.DiffID().String()) -} - -func (pd *v2PushDescriptor) DiffID() layer.DiffID { - return pd.layer.DiffID() -} - -func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if fs, ok := pd.layer.(distribution.Describable); ok { - if d := fs.Descriptor(); len(d.URLs) > 0 { - progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") - return d, nil - } - } - - diffID := pd.DiffID() - - pd.pushState.Lock() - if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { - // it is already known that the push is not needed and - // therefore doing a stat is unnecessary - pd.pushState.Unlock() - progress.Update(progressOutput, pd.ID(), "Layer already exists") - return descriptor, nil - } - pd.pushState.Unlock() - - // Do we have any metadata associated with this layer's DiffID? - v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) - if err == nil { - descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) - if err != nil { - progress.Update(progressOutput, pd.ID(), "Image push failed") - return distribution.Descriptor{}, retryOnError(err) - } - if exists { - progress.Update(progressOutput, pd.ID(), "Layer already exists") - pd.pushState.Lock() - pd.pushState.remoteLayers[diffID] = descriptor - pd.pushState.Unlock() - return descriptor, nil - } - } - - logrus.Debugf("Pushing layer: %s", diffID) - - // if digest was empty or not saved, or if blob does not exist on the remote repository, - // then push the blob. - bs := pd.repo.Blobs(ctx) - - var layerUpload distribution.BlobWriter - mountAttemptsRemaining := 3 - - // Attempt to find another repository in the same registry to mount the layer - // from to avoid an unnecessary upload. - // Note: metadata is stored from oldest to newest, so we iterate through this - // slice in reverse to maximize our chances of the blob still existing in the - // remote repository. - for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- { - mountFrom := v2Metadata[i] - - sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository) - if err != nil { - continue - } - if pd.repoInfo.Hostname() != sourceRepo.Hostname() { - // don't mount blobs from another registry - continue - } - - namedRef, err := reference.WithName(mountFrom.SourceRepository) - if err != nil { - continue - } - - // TODO (brianbland): We need to construct a reference where the Name is - // only the full remote name, so clean this up when distribution has a - // richer reference package - remoteRef, err := distreference.WithName(namedRef.RemoteName()) - if err != nil { - continue - } - - canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) - if err != nil { - continue - } - - logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName()) - - layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef)) - switch err := err.(type) { - case distribution.ErrBlobMounted: - progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) - - err.Descriptor.MediaType = schema2.MediaTypeLayer - - pd.pushState.Lock() - pd.pushState.confirmedV2 = true - pd.pushState.remoteLayers[diffID] = err.Descriptor - pd.pushState.Unlock() - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - return err.Descriptor, nil - case nil: - // blob upload session created successfully, so begin the upload - mountAttemptsRemaining = 0 - default: - // unable to mount layer from this repository, so this source mapping is no longer valid - logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) - pd.v2MetadataService.Remove(mountFrom) - mountAttemptsRemaining-- - } - } - - if layerUpload == nil { - layerUpload, err = bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - } - defer layerUpload.Close() - - arch, err := pd.layer.TarStream() - if err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - - // don't care if this fails; best effort - size, _ := pd.layer.DiffSize() - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") - compressedReader, compressionDone := compress(reader) - defer func() { - reader.Close() - <-compressionDone - }() - - digester := digest.Canonical.New() - tee := io.TeeReader(compressedReader, digester.Hash()) - - nn, err := layerUpload.ReadFrom(tee) - compressedReader.Close() - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - pushDigest := digester.Digest() - if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) - progress.Update(progressOutput, pd.ID(), "Pushed") - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - - pd.pushState.Lock() - - // If Commit succeeded, that's an indication that the remote registry - // speaks the v2 protocol. - pd.pushState.confirmedV2 = true - - descriptor := distribution.Descriptor{ - Digest: pushDigest, - MediaType: schema2.MediaTypeLayer, - Size: nn, - } - pd.pushState.remoteLayers[diffID] = descriptor - - pd.pushState.Unlock() - - return descriptor, nil -} - -func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { - pd.remoteDescriptor = descriptor -} - -func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { - return pd.remoteDescriptor -} - -// layerAlreadyExists checks if the registry already know about any of the -// metadata passed in the "metadata" slice. If it finds one that the registry -// knows about, it returns the known digest and "true". -func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { - for _, meta := range metadata { - // Only check blobsums that are known to this repository or have an unknown source - if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { - continue - } - descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) - switch err { - case nil: - descriptor.MediaType = schema2.MediaTypeLayer - return descriptor, true, nil - case distribution.ErrBlobUnknown: - // nop - default: - return distribution.Descriptor{}, false, err - } - } - return distribution.Descriptor{}, false, nil -} diff --git a/distribution/registry.go b/distribution/registry.go deleted file mode 100644 index 98b82fbae..000000000 --- a/distribution/registry.go +++ /dev/null @@ -1,122 +0,0 @@ -package distribution - -import ( - "fmt" - "net" - "net/http" - "time" - - "github.com/docker/distribution" - distreference "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" -) - -// NewV2Repository returns a repository (v2 only). It creates an HTTP transport -// providing timeout settings and authentication support, and also verifies the -// remote API version. -func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { - repoName := repoInfo.FullName() - // If endpoint does not support CanonicalName, use the RemoteName instead - if endpoint.TrimHostname { - repoName = repoInfo.RemoteName() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - // TODO(dmcgowan): Call close idle connections when complete, use keep alive - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: endpoint.TLSConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - - modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) - authTransport := transport.NewTransport(base, modifiers...) - - challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) - if err != nil { - transportOK := false - if responseErr, ok := err.(registry.PingResponseError); ok { - transportOK = true - err = responseErr.Err - } - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: transportOK, - } - } - - if authConfig.RegistryToken != "" { - passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) - } else { - creds := registry.NewStaticCredentialStore(authConfig) - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - Scopes: []auth.Scope{ - auth.RepositoryScope{ - Repository: repoName, - Actions: actions, - }, - }, - ClientID: registry.AuthClientID, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - } - tr := transport.NewTransport(base, modifiers...) - - repoNameRef, err := distreference.ParseNamed(repoName) - if err != nil { - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - - repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) - if err != nil { - err = fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - return -} - -type existingTokenHandler struct { - token string -} - -func (th *existingTokenHandler) Scheme() string { - return "bearer" -} - -func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) - return nil -} diff --git a/distribution/registry_unit_test.go b/distribution/registry_unit_test.go deleted file mode 100644 index b60a465d7..000000000 --- a/distribution/registry_unit_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package distribution - -import ( - "net/http" - "net/http/httptest" - "net/url" - "os" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -const secretRegistryToken = "mysecrettoken" - -type tokenPassThruHandler struct { - reached bool - gotToken bool - shouldSend401 func(url string) bool -} - -func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.reached = true - if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { - logrus.Debug("Detected registry token in auth header") - h.gotToken = true - } - if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { - w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) - w.WriteHeader(401) - } -} - -func testTokenPassThru(t *testing.T, ts *httptest.Server) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - uri, err := url.Parse(ts.URL) - if err != nil { - t.Fatalf("could not parse url from test server: %v", err) - } - - endpoint := registry.APIEndpoint{ - Mirror: false, - URL: uri, - Version: 2, - Official: false, - TrimHostname: false, - TLSConfig: nil, - //VersionHeader: "verheader", - } - n, _ := reference.ParseNamed("testremotename") - repoInfo := ®istry.RepositoryInfo{ - Named: n, - Index: ®istrytypes.IndexInfo{ - Name: "testrepo", - Mirrors: nil, - Secure: false, - Official: false, - }, - Official: false, - } - imagePullConfig := &ImagePullConfig{ - MetaHeaders: http.Header{}, - AuthConfig: &types.AuthConfig{ - RegistryToken: secretRegistryToken, - }, - } - puller, err := newPuller(endpoint, repoInfo, imagePullConfig) - if err != nil { - t.Fatal(err) - } - p := puller.(*v2Puller) - ctx := context.Background() - p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - t.Fatal(err) - } - - logrus.Debug("About to pull") - // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above - tag, _ := reference.WithTag(n, "tag_goes_here") - _ = p.pullV2Repository(ctx, tag) -} - -func TestTokenPassThru(t *testing.T) { - handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} - ts := httptest.NewServer(handler) - defer ts.Close() - - testTokenPassThru(t, ts) - - if !handler.reached { - t.Fatal("Handler not reached") - } - if !handler.gotToken { - t.Fatal("Failed to receive registry token") - } -} - -func TestTokenPassThruDifferentHost(t *testing.T) { - handler := new(tokenPassThruHandler) - ts := httptest.NewServer(handler) - defer ts.Close() - - tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/v2/" { - w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) - w.WriteHeader(401) - return - } - http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) - })) - defer tsredirect.Close() - - testTokenPassThru(t, tsredirect) - - if !handler.reached { - t.Fatal("Handler not reached") - } - if handler.gotToken { - t.Fatal("Redirect should not forward Authorization header to another host") - } -} diff --git a/distribution/xfer/download.go b/distribution/xfer/download.go deleted file mode 100644 index 754534221..000000000 --- a/distribution/xfer/download.go +++ /dev/null @@ -1,452 +0,0 @@ -package xfer - -import ( - "errors" - "fmt" - "io" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxDownloadAttempts = 5 - -// LayerDownloadManager figures out which layers need to be downloaded, then -// registers and downloads those, taking into account dependencies between -// layers. -type LayerDownloadManager struct { - layerStore layer.Store - tm TransferManager -} - -// SetConcurrency set the max concurrent downloads for each pull -func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { - ldm.tm.SetConcurrency(concurrency) -} - -// NewLayerDownloadManager returns a new LayerDownloadManager. -func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { - return &LayerDownloadManager{ - layerStore: layerStore, - tm: NewTransferManager(concurrencyLimit), - } -} - -type downloadTransfer struct { - Transfer - - layerStore layer.Store - layer layer.Layer - err error -} - -// result returns the layer resulting from the download, if the download -// and registration were successful. -func (d *downloadTransfer) result() (layer.Layer, error) { - return d.layer, d.err -} - -// A DownloadDescriptor references a layer that may need to be downloaded. -type DownloadDescriptor interface { - // Key returns the key used to deduplicate downloads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer, or an error - // if it is unknown (for example, if it has not been downloaded - // before). - DiffID() (layer.DiffID, error) - // Download is called to perform the download. - Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) - // Close is called when the download manager is finished with this - // descriptor and will not call Download again or read from the reader - // that Download returned. - Close() -} - -// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an -// additional Registered method which gets called after a downloaded layer is -// registered. This allows the user of the download manager to know the DiffID -// of each registered layer. This method is called if a cast to -// DownloadDescriptorWithRegistered is successful. -type DownloadDescriptorWithRegistered interface { - DownloadDescriptor - Registered(diffID layer.DiffID) -} - -// Download is a blocking function which ensures the requested layers are -// present in the layer store. It uses the string returned by the Key method to -// deduplicate downloads. If a given layer is not already known to present in -// the layer store, and the key is not used by an in-progress download, the -// Download method is called to get the layer tar data. Layers are then -// registered in the appropriate order. The caller must call the returned -// release function once it is is done with the returned RootFS object. -func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { - var ( - topLayer layer.Layer - topDownload *downloadTransfer - watcher *Watcher - missingLayer bool - transferKey = "" - downloadsByKey = make(map[string]*downloadTransfer) - ) - - rootFS := initialRootFS - for _, descriptor := range layers { - key := descriptor.Key() - transferKey += key - - if !missingLayer { - missingLayer = true - diffID, err := descriptor.DiffID() - if err == nil { - getRootFS := rootFS - getRootFS.Append(diffID) - l, err := ldm.layerStore.Get(getRootFS.ChainID()) - if err == nil { - // Layer already exists. - logrus.Debugf("Layer already exists: %s", descriptor.ID()) - progress.Update(progressOutput, descriptor.ID(), "Already exists") - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - topLayer = l - missingLayer = false - rootFS.Append(diffID) - continue - } - } - } - - // Does this layer have the same data as a previous layer in - // the stack? If so, avoid downloading it more than once. - var topDownloadUncasted Transfer - if existingDownload, ok := downloadsByKey[key]; ok { - xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) - defer topDownload.Transfer.Release(watcher) - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - continue - } - - // Layer is not known to exist - download and register it. - progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") - - var xferFunc DoFunc - if topDownload != nil { - xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) - defer topDownload.Transfer.Release(watcher) - } else { - xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) - } - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - downloadsByKey[key] = topDownload - } - - if topDownload == nil { - return rootFS, func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - }, nil - } - - // Won't be using the list built up so far - will generate it - // from downloaded layers instead. - rootFS.DiffIDs = []layer.DiffID{} - - defer func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - }() - - select { - case <-ctx.Done(): - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, ctx.Err() - case <-topDownload.Done(): - break - } - - l, err := topDownload.result() - if err != nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, err - } - - // Must do this exactly len(layers) times, so we don't include the - // base layer on Windows. - for range layers { - if l == nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, errors.New("internal error: too few parent layers") - } - rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) - l = l.Parent() - } - return rootFS, func() { topDownload.Transfer.Release(watcher) }, err -} - -// makeDownloadFunc returns a function that performs the layer download and -// registration. If parentDownload is non-nil, it waits for that download to -// complete before the registration step, and registers the downloaded data -// on top of parentDownload's resulting layer. Otherwise, it registers the -// layer on top of the ChainID given by parentLayer. -func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStore, - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - if parentDownload != nil { - // Did the parent download already fail or get - // cancelled? - select { - case <-parentDownload.Done(): - _, err := parentDownload.result() - if err != nil { - d.err = err - return - } - default: - } - } - - var ( - downloadReader io.ReadCloser - size int64 - err error - retries int - ) - - defer descriptor.Close() - - for { - downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) - if err == nil { - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-d.Transfer.Context().Done(): - d.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { - logrus.Errorf("Download failed: %v", err) - d.err = err - return - } - - logrus.Errorf("Download failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(time.Second) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-d.Transfer.Context().Done(): - ticker.Stop() - d.err = errors.New("download cancelled during retry delay") - return - } - - } - } - - close(inactive) - - if parentDownload != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - downloadReader.Close() - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - downloadReader.Close() - return - } - parentLayer = l.ChainID() - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") - defer reader.Close() - - inflatedLayerData, err := archive.DecompressStream(reader) - if err != nil { - d.err = fmt.Errorf("could not get decompression stream: %v", err) - return - } - - var src distribution.Descriptor - if fs, ok := descriptor.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) - } - if err != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - default: - d.err = fmt.Errorf("failed to register layer: %v", err) - } - return - } - - progress.Update(progressOutput, descriptor.ID(), "Pull complete") - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} - -// makeDownloadFuncFromDownload returns a function that performs the layer -// registration when the layer data is coming from an existing download. It -// waits for sourceDownload and parentDownload to complete, and then -// reregisters the data from sourceDownload's top layer on top of -// parentDownload. This function does not log progress output because it would -// interfere with the progress reporting for sourceDownload, which has the same -// Key. -func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStore, - } - - go func() { - defer func() { - close(progressChan) - }() - - <-start - - close(inactive) - - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - return - } - parentLayer := l.ChainID() - - // sourceDownload should have already finished if - // parentDownload finished, but wait for it explicitly - // to be sure. - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-sourceDownload.Done(): - } - - l, err = sourceDownload.result() - if err != nil { - d.err = err - return - } - - layerReader, err := l.TarStream() - if err != nil { - d.err = err - return - } - defer layerReader.Close() - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(layerReader, parentLayer) - } - if err != nil { - d.err = fmt.Errorf("failed to register layer: %v", err) - return - } - - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go deleted file mode 100644 index a1801eb07..000000000 --- a/distribution/xfer/download_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package xfer - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "runtime" - "sync/atomic" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxDownloadConcurrency = 3 - -type mockLayer struct { - layerData bytes.Buffer - diffID layer.DiffID - chainID layer.ChainID - parent layer.Layer -} - -func (ml *mockLayer) TarStream() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil -} - -func (ml *mockLayer) ChainID() layer.ChainID { - return ml.chainID -} - -func (ml *mockLayer) DiffID() layer.DiffID { - return ml.diffID -} - -func (ml *mockLayer) Parent() layer.Layer { - return ml.parent -} - -func (ml *mockLayer) Size() (size int64, err error) { - return 0, nil -} - -func (ml *mockLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (ml *mockLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} - -type mockLayerStore struct { - layers map[layer.ChainID]*mockLayer -} - -func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) -} - -func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { - return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) -} - -func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { - var ( - parent layer.Layer - err error - ) - - if parentID != "" { - parent, err = ls.Get(parentID) - if err != nil { - return nil, err - } - } - - l := &mockLayer{parent: parent} - _, err = l.layerData.ReadFrom(reader) - if err != nil { - return nil, err - } - l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) - l.chainID = createChainIDFromParent(parentID, l.diffID) - - ls.layers[l.chainID] = l - return l, nil -} - -func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { - l, ok := ls.layers[chainID] - if !ok { - return nil, layer.ErrLayerDoesNotExist - } - return l, nil -} - -func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { - return []layer.Metadata{}, nil -} -func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { - return nil, errors.New("not implemented") -} - -func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { - return nil, errors.New("not implemented") -} - -func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { - return nil, errors.New("not implemented") -} -func (ls *mockLayerStore) GetMountID(string) (string, error) { - return "", errors.New("not implemented") -} - -func (ls *mockLayerStore) Cleanup() error { - return nil -} - -func (ls *mockLayerStore) DriverStatus() [][2]string { - return [][2]string{} -} - -func (ls *mockLayerStore) DriverName() string { - return "mock" -} - -type mockDownloadDescriptor struct { - currentDownloads *int32 - id string - diffID layer.DiffID - registeredDiffID layer.DiffID - expectedDiffID layer.DiffID - simulateRetries int -} - -// Key returns the key used to deduplicate downloads. -func (d *mockDownloadDescriptor) Key() string { - return d.id -} - -// ID returns the ID for display purposes. -func (d *mockDownloadDescriptor) ID() string { - return d.id -} - -// DiffID should return the DiffID for this layer, or an error -// if it is unknown (for example, if it has not been downloaded -// before). -func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { - if d.diffID != "" { - return d.diffID, nil - } - return "", errors.New("no diffID available") -} - -func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { - d.registeredDiffID = diffID -} - -func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { - // The mock implementation returns the ID repeated 5 times as a tar - // stream instead of actual tar data. The data is ignored except for - // computing IDs. - return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) -} - -// Download is called to perform the download. -func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - if d.currentDownloads != nil { - defer atomic.AddInt32(d.currentDownloads, -1) - - if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { - return nil, 0, errors.New("concurrency limit exceeded") - } - } - - // Sleep a bit to simulate a time-consuming download. - for i := int64(0); i <= 10; i++ { - select { - case <-ctx.Done(): - return nil, 0, ctx.Err() - case <-time.After(10 * time.Millisecond): - progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) - } - } - - if d.simulateRetries != 0 { - d.simulateRetries-- - return nil, 0, errors.New("simulating retry") - } - - return d.mockTarStream(), 0, nil -} - -func (d *mockDownloadDescriptor) Close() { -} - -func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { - return []DownloadDescriptor{ - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id1", - expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id2", - expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id3", - expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id2", - expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id4", - expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), - simulateRetries: 1, - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id5", - expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), - }, - } -} - -func TestSuccessfulDownload(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} - ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]progress.Progress) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p - } - close(progressDone) - }() - - var currentDownloads int32 - descriptors := downloadDescriptors(¤tDownloads) - - firstDescriptor := descriptors[0].(*mockDownloadDescriptor) - - // Pre-register the first layer to simulate an already-existing layer - l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") - if err != nil { - t.Fatal(err) - } - firstDescriptor.diffID = l.DiffID() - - rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) - if err != nil { - t.Fatalf("download error: %v", err) - } - - releaseFunc() - - close(progressChan) - <-progressDone - - if len(rootFS.DiffIDs) != len(descriptors) { - t.Fatal("got wrong number of diffIDs in rootfs") - } - - for i, d := range descriptors { - descriptor := d.(*mockDownloadDescriptor) - - if descriptor.diffID != "" { - if receivedProgress[d.ID()].Action != "Already exists" { - t.Fatalf("did not get 'Already exists' message for %v", d.ID()) - } - } else if receivedProgress[d.ID()].Action != "Pull complete" { - t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) - } - - if rootFS.DiffIDs[i] != descriptor.expectedDiffID { - t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) - } - - if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { - t.Fatal("diffID mismatch between rootFS and Registered callback") - } - } -} - -func TestCancelledDownload(t *testing.T) { - ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - - go func() { - for range progressChan { - } - close(progressDone) - }() - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-time.After(time.Millisecond) - cancel() - }() - - descriptors := downloadDescriptors(nil) - _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) - if err != context.Canceled { - t.Fatal("expected download to be cancelled") - } - - close(progressChan) - <-progressDone -} diff --git a/distribution/xfer/transfer.go b/distribution/xfer/transfer.go deleted file mode 100644 index 14f15660a..000000000 --- a/distribution/xfer/transfer.go +++ /dev/null @@ -1,401 +0,0 @@ -package xfer - -import ( - "runtime" - "sync" - - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -// DoNotRetry is an error wrapper indicating that the error cannot be resolved -// with a retry. -type DoNotRetry struct { - Err error -} - -// Error returns the stringified representation of the encapsulated error. -func (e DoNotRetry) Error() string { - return e.Err.Error() -} - -// Watcher is returned by Watch and can be passed to Release to stop watching. -type Watcher struct { - // signalChan is used to signal to the watcher goroutine that - // new progress information is available, or that the transfer - // has finished. - signalChan chan struct{} - // releaseChan signals to the watcher goroutine that the watcher - // should be detached. - releaseChan chan struct{} - // running remains open as long as the watcher is watching the - // transfer. It gets closed if the transfer finishes or the - // watcher is detached. - running chan struct{} -} - -// Transfer represents an in-progress transfer. -type Transfer interface { - Watch(progressOutput progress.Output) *Watcher - Release(*Watcher) - Context() context.Context - Close() - Done() <-chan struct{} - Released() <-chan struct{} - Broadcast(masterProgressChan <-chan progress.Progress) -} - -type transfer struct { - mu sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // watchers keeps track of the goroutines monitoring progress output, - // indexed by the channels that release them. - watchers map[chan struct{}]*Watcher - - // lastProgress is the most recently received progress event. - lastProgress progress.Progress - // hasLastProgress is true when lastProgress has been set. - hasLastProgress bool - - // running remains open as long as the transfer is in progress. - running chan struct{} - // released stays open until all watchers release the transfer and - // the transfer is no longer tracked by the transfer manager. - released chan struct{} - - // broadcastDone is true if the master progress channel has closed. - broadcastDone bool - // closed is true if Close has been called - closed bool - // broadcastSyncChan allows watchers to "ping" the broadcasting - // goroutine to wait for it for deplete its input channel. This ensures - // a detaching watcher won't miss an event that was sent before it - // started detaching. - broadcastSyncChan chan struct{} -} - -// NewTransfer creates a new transfer. -func NewTransfer() Transfer { - t := &transfer{ - watchers: make(map[chan struct{}]*Watcher), - running: make(chan struct{}), - released: make(chan struct{}), - broadcastSyncChan: make(chan struct{}), - } - - // This uses context.Background instead of a caller-supplied context - // so that a transfer won't be cancelled automatically if the client - // which requested it is ^C'd (there could be other viewers). - t.ctx, t.cancel = context.WithCancel(context.Background()) - - return t -} - -// Broadcast copies the progress and error output to all viewers. -func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { - for { - var ( - p progress.Progress - ok bool - ) - select { - case p, ok = <-masterProgressChan: - default: - // We've depleted the channel, so now we can handle - // reads on broadcastSyncChan to let detaching watchers - // know we're caught up. - select { - case <-t.broadcastSyncChan: - continue - case p, ok = <-masterProgressChan: - } - } - - t.mu.Lock() - if ok { - t.lastProgress = p - t.hasLastProgress = true - for _, w := range t.watchers { - select { - case w.signalChan <- struct{}{}: - default: - } - } - } else { - t.broadcastDone = true - } - t.mu.Unlock() - if !ok { - close(t.running) - return - } - } -} - -// Watch adds a watcher to the transfer. The supplied channel gets progress -// updates and is closed when the transfer finishes. -func (t *transfer) Watch(progressOutput progress.Output) *Watcher { - t.mu.Lock() - defer t.mu.Unlock() - - w := &Watcher{ - releaseChan: make(chan struct{}), - signalChan: make(chan struct{}), - running: make(chan struct{}), - } - - t.watchers[w.releaseChan] = w - - if t.broadcastDone { - close(w.running) - return w - } - - go func() { - defer func() { - close(w.running) - }() - var ( - done bool - lastWritten progress.Progress - hasLastWritten bool - ) - for { - t.mu.Lock() - hasLastProgress := t.hasLastProgress - lastProgress := t.lastProgress - t.mu.Unlock() - - // Make sure we don't write the last progress item - // twice. - if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { - progressOutput.WriteProgress(lastProgress) - lastWritten = lastProgress - hasLastWritten = true - } - - if done { - return - } - - select { - case <-w.signalChan: - case <-w.releaseChan: - done = true - // Since the watcher is going to detach, make - // sure the broadcaster is caught up so we - // don't miss anything. - select { - case t.broadcastSyncChan <- struct{}{}: - case <-t.running: - } - case <-t.running: - done = true - } - } - }() - - return w -} - -// Release is the inverse of Watch; indicating that the watcher no longer wants -// to be notified about the progress of the transfer. All calls to Watch must -// be paired with later calls to Release so that the lifecycle of the transfer -// is properly managed. -func (t *transfer) Release(watcher *Watcher) { - t.mu.Lock() - delete(t.watchers, watcher.releaseChan) - - if len(t.watchers) == 0 { - if t.closed { - // released may have been closed already if all - // watchers were released, then another one was added - // while waiting for a previous watcher goroutine to - // finish. - select { - case <-t.released: - default: - close(t.released) - } - } else { - t.cancel() - } - } - t.mu.Unlock() - - close(watcher.releaseChan) - // Block until the watcher goroutine completes - <-watcher.running -} - -// Done returns a channel which is closed if the transfer completes or is -// cancelled. Note that having 0 watchers causes a transfer to be cancelled. -func (t *transfer) Done() <-chan struct{} { - // Note that this doesn't return t.ctx.Done() because that channel will - // be closed the moment Cancel is called, and we need to return a - // channel that blocks until a cancellation is actually acknowledged by - // the transfer function. - return t.running -} - -// Released returns a channel which is closed once all watchers release the -// transfer AND the transfer is no longer tracked by the transfer manager. -func (t *transfer) Released() <-chan struct{} { - return t.released -} - -// Context returns the context associated with the transfer. -func (t *transfer) Context() context.Context { - return t.ctx -} - -// Close is called by the transfer manager when the transfer is no longer -// being tracked. -func (t *transfer) Close() { - t.mu.Lock() - t.closed = true - if len(t.watchers) == 0 { - close(t.released) - } - t.mu.Unlock() -} - -// DoFunc is a function called by the transfer manager to actually perform -// a transfer. It should be non-blocking. It should wait until the start channel -// is closed before transferring any data. If the function closes inactive, that -// signals to the transfer manager that the job is no longer actively moving -// data - for example, it may be waiting for a dependent transfer to finish. -// This prevents it from taking up a slot. -type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer - -// TransferManager is used by LayerDownloadManager and LayerUploadManager to -// schedule and deduplicate transfers. It is up to the TransferManager -// implementation to make the scheduling and concurrency decisions. -type TransferManager interface { - // Transfer checks if a transfer with the given key is in progress. If - // so, it returns progress and error output from that transfer. - // Otherwise, it will call xferFunc to initiate the transfer. - Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) - // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload - SetConcurrency(concurrency int) -} - -type transferManager struct { - mu sync.Mutex - - concurrencyLimit int - activeTransfers int - transfers map[string]Transfer - waitingTransfers []chan struct{} -} - -// NewTransferManager returns a new TransferManager. -func NewTransferManager(concurrencyLimit int) TransferManager { - return &transferManager{ - concurrencyLimit: concurrencyLimit, - transfers: make(map[string]Transfer), - } -} - -// SetConcurrency set the concurrencyLimit -func (tm *transferManager) SetConcurrency(concurrency int) { - tm.mu.Lock() - tm.concurrencyLimit = concurrency - tm.mu.Unlock() -} - -// Transfer checks if a transfer matching the given key is in progress. If not, -// it starts one by calling xferFunc. The caller supplies a channel which -// receives progress output from the transfer. -func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { - tm.mu.Lock() - defer tm.mu.Unlock() - - for { - xfer, present := tm.transfers[key] - if !present { - break - } - // Transfer is already in progress. - watcher := xfer.Watch(progressOutput) - - select { - case <-xfer.Context().Done(): - // We don't want to watch a transfer that has been cancelled. - // Wait for it to be removed from the map and try again. - xfer.Release(watcher) - tm.mu.Unlock() - // The goroutine that removes this transfer from the - // map is also waiting for xfer.Done(), so yield to it. - // This could be avoided by adding a Closed method - // to Transfer to allow explicitly waiting for it to be - // removed the map, but forcing a scheduling round in - // this very rare case seems better than bloating the - // interface definition. - runtime.Gosched() - <-xfer.Done() - tm.mu.Lock() - default: - return xfer, watcher - } - } - - start := make(chan struct{}) - inactive := make(chan struct{}) - - if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { - close(start) - tm.activeTransfers++ - } else { - tm.waitingTransfers = append(tm.waitingTransfers, start) - } - - masterProgressChan := make(chan progress.Progress) - xfer := xferFunc(masterProgressChan, start, inactive) - watcher := xfer.Watch(progressOutput) - go xfer.Broadcast(masterProgressChan) - tm.transfers[key] = xfer - - // When the transfer is finished, remove from the map. - go func() { - for { - select { - case <-inactive: - tm.mu.Lock() - tm.inactivate(start) - tm.mu.Unlock() - inactive = nil - case <-xfer.Done(): - tm.mu.Lock() - if inactive != nil { - tm.inactivate(start) - } - delete(tm.transfers, key) - tm.mu.Unlock() - xfer.Close() - return - } - } - }() - - return xfer, watcher -} - -func (tm *transferManager) inactivate(start chan struct{}) { - // If the transfer was started, remove it from the activeTransfers - // count. - select { - case <-start: - // Start next transfer if any are waiting - if len(tm.waitingTransfers) != 0 { - close(tm.waitingTransfers[0]) - tm.waitingTransfers = tm.waitingTransfers[1:] - } else { - tm.activeTransfers-- - } - default: - } -} diff --git a/distribution/xfer/transfer_test.go b/distribution/xfer/transfer_test.go deleted file mode 100644 index 6c50ce352..000000000 --- a/distribution/xfer/transfer_test.go +++ /dev/null @@ -1,410 +0,0 @@ -package xfer - -import ( - "sync/atomic" - "testing" - "time" - - "github.com/docker/docker/pkg/progress" -) - -func TestTransfer(t *testing.T) { - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - select { - case <-start: - default: - t.Fatalf("transfer function not started even though concurrency limit not reached") - } - - xfer := NewTransfer() - go func() { - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(5) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - val, present := receivedProgress[p.ID] - if present && p.Current <= val { - t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) - } - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start a few transfers - ids := []string{"id1", "id2", "id3"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestConcurrencyLimit(t *testing.T) { - concurrencyLimit := 3 - var runningJobs int32 - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - <-start - totalJobs := atomic.AddInt32(&runningJobs, 1) - if int(totalJobs) > concurrencyLimit { - t.Fatalf("too many jobs running") - } - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - atomic.AddInt32(&runningJobs, -1) - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(concurrencyLimit) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start more transfers than the concurrency limit - ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestInactiveJobs(t *testing.T) { - concurrencyLimit := 3 - var runningJobs int32 - testDone := make(chan struct{}) - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - <-start - totalJobs := atomic.AddInt32(&runningJobs, 1) - if int(totalJobs) > concurrencyLimit { - t.Fatalf("too many jobs running") - } - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - atomic.AddInt32(&runningJobs, -1) - close(inactive) - <-testDone - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(concurrencyLimit) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start more transfers than the concurrency limit - ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - close(testDone) - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestWatchRelease(t *testing.T) { - ready := make(chan struct{}) - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - defer func() { - close(progressChan) - }() - <-ready - for i := int64(0); ; i++ { - select { - case <-time.After(10 * time.Millisecond): - case <-xfer.Context().Done(): - return - } - progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} - } - }() - return xfer - } - } - - tm := NewTransferManager(5) - - type watcherInfo struct { - watcher *Watcher - progressChan chan progress.Progress - progressDone chan struct{} - receivedFirstProgress chan struct{} - } - - progressConsumer := func(w watcherInfo) { - first := true - for range w.progressChan { - if first { - close(w.receivedFirstProgress) - } - first = false - } - close(w.progressDone) - } - - // Start a transfer - watchers := make([]watcherInfo, 5) - var xfer Transfer - watchers[0].progressChan = make(chan progress.Progress) - watchers[0].progressDone = make(chan struct{}) - watchers[0].receivedFirstProgress = make(chan struct{}) - xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) - go progressConsumer(watchers[0]) - - // Give it multiple watchers - for i := 1; i != len(watchers); i++ { - watchers[i].progressChan = make(chan progress.Progress) - watchers[i].progressDone = make(chan struct{}) - watchers[i].receivedFirstProgress = make(chan struct{}) - watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) - go progressConsumer(watchers[i]) - } - - // Now that the watchers are set up, allow the transfer goroutine to - // proceed. - close(ready) - - // Confirm that each watcher gets progress output. - for _, w := range watchers { - <-w.receivedFirstProgress - } - - // Release one watcher every 5ms - for _, w := range watchers { - xfer.Release(w.watcher) - <-time.After(5 * time.Millisecond) - } - - // Now that all watchers have been released, Released() should - // return a closed channel. - <-xfer.Released() - - // Done() should return a closed channel because the xfer func returned - // due to cancellation. - <-xfer.Done() - - for _, w := range watchers { - close(w.progressChan) - <-w.progressDone - } -} - -func TestWatchFinishedTransfer(t *testing.T) { - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - // Finish immediately - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(5) - - // Start a transfer - watchers := make([]*Watcher, 3) - var xfer Transfer - xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) - - // Give it a watcher immediately - watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) - - // Wait for the transfer to complete - <-xfer.Done() - - // Set up another watcher - watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) - - // Release the watchers - for _, w := range watchers { - xfer.Release(w) - } - - // Now that all watchers have been released, Released() should - // return a closed channel. - <-xfer.Released() -} - -func TestDuplicateTransfer(t *testing.T) { - ready := make(chan struct{}) - - var xferFuncCalls int32 - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - atomic.AddInt32(&xferFuncCalls, 1) - xfer := NewTransfer() - go func() { - defer func() { - close(progressChan) - }() - <-ready - for i := int64(0); ; i++ { - select { - case <-time.After(10 * time.Millisecond): - case <-xfer.Context().Done(): - return - } - progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} - } - }() - return xfer - } - } - - tm := NewTransferManager(5) - - type transferInfo struct { - xfer Transfer - watcher *Watcher - progressChan chan progress.Progress - progressDone chan struct{} - receivedFirstProgress chan struct{} - } - - progressConsumer := func(t transferInfo) { - first := true - for range t.progressChan { - if first { - close(t.receivedFirstProgress) - } - first = false - } - close(t.progressDone) - } - - // Try to start multiple transfers with the same ID - transfers := make([]transferInfo, 5) - for i := range transfers { - t := &transfers[i] - t.progressChan = make(chan progress.Progress) - t.progressDone = make(chan struct{}) - t.receivedFirstProgress = make(chan struct{}) - t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) - go progressConsumer(*t) - } - - // Allow the transfer goroutine to proceed. - close(ready) - - // Confirm that each watcher gets progress output. - for _, t := range transfers { - <-t.receivedFirstProgress - } - - // Confirm that the transfer function was called exactly once. - if xferFuncCalls != 1 { - t.Fatal("transfer function wasn't called exactly once") - } - - // Release one watcher every 5ms - for _, t := range transfers { - t.xfer.Release(t.watcher) - <-time.After(5 * time.Millisecond) - } - - for _, t := range transfers { - // Now that all watchers have been released, Released() should - // return a closed channel. - <-t.xfer.Released() - // Done() should return a closed channel because the xfer func returned - // due to cancellation. - <-t.xfer.Done() - } - - for _, t := range transfers { - close(t.progressChan) - <-t.progressDone - } -} diff --git a/distribution/xfer/upload.go b/distribution/xfer/upload.go deleted file mode 100644 index ad3398369..000000000 --- a/distribution/xfer/upload.go +++ /dev/null @@ -1,168 +0,0 @@ -package xfer - -import ( - "errors" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxUploadAttempts = 5 - -// LayerUploadManager provides task management and progress reporting for -// uploads. -type LayerUploadManager struct { - tm TransferManager -} - -// SetConcurrency set the max concurrent uploads for each push -func (lum *LayerUploadManager) SetConcurrency(concurrency int) { - lum.tm.SetConcurrency(concurrency) -} - -// NewLayerUploadManager returns a new LayerUploadManager. -func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { - return &LayerUploadManager{ - tm: NewTransferManager(concurrencyLimit), - } -} - -type uploadTransfer struct { - Transfer - - remoteDescriptor distribution.Descriptor - err error -} - -// An UploadDescriptor references a layer that may need to be uploaded. -type UploadDescriptor interface { - // Key returns the key used to deduplicate uploads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer. - DiffID() layer.DiffID - // Upload is called to perform the Upload. - Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) - // SetRemoteDescriptor provides the distribution.Descriptor that was - // returned by Upload. This descriptor is not to be confused with - // the UploadDescriptor interface, which is used for internally - // identifying layers that are being uploaded. - SetRemoteDescriptor(descriptor distribution.Descriptor) -} - -// Upload is a blocking function which ensures the listed layers are present on -// the remote registry. It uses the string returned by the Key method to -// deduplicate uploads. -func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { - var ( - uploads []*uploadTransfer - dedupDescriptors = make(map[string]*uploadTransfer) - ) - - for _, descriptor := range layers { - progress.Update(progressOutput, descriptor.ID(), "Preparing") - - key := descriptor.Key() - if _, present := dedupDescriptors[key]; present { - continue - } - - xferFunc := lum.makeUploadFunc(descriptor) - upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) - defer upload.Release(watcher) - uploads = append(uploads, upload.(*uploadTransfer)) - dedupDescriptors[key] = upload.(*uploadTransfer) - } - - for _, upload := range uploads { - select { - case <-ctx.Done(): - return ctx.Err() - case <-upload.Transfer.Done(): - if upload.err != nil { - return upload.err - } - } - } - for _, l := range layers { - l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) - } - - return nil -} - -func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - u := &uploadTransfer{ - Transfer: NewTransfer(), - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - retries := 0 - for { - remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) - if err == nil { - u.remoteDescriptor = remoteDescriptor - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-u.Transfer.Context().Done(): - u.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { - logrus.Errorf("Upload failed: %v", err) - u.err = err - return - } - - logrus.Errorf("Upload failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(time.Second) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-u.Transfer.Context().Done(): - ticker.Stop() - u.err = errors.New("upload cancelled during retry delay") - return - } - } - } - }() - - return u - } -} diff --git a/distribution/xfer/upload_test.go b/distribution/xfer/upload_test.go deleted file mode 100644 index 16bd18733..000000000 --- a/distribution/xfer/upload_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package xfer - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxUploadConcurrency = 3 - -type mockUploadDescriptor struct { - currentUploads *int32 - diffID layer.DiffID - simulateRetries int -} - -// Key returns the key used to deduplicate downloads. -func (u *mockUploadDescriptor) Key() string { - return u.diffID.String() -} - -// ID returns the ID for display purposes. -func (u *mockUploadDescriptor) ID() string { - return u.diffID.String() -} - -// DiffID should return the DiffID for this layer. -func (u *mockUploadDescriptor) DiffID() layer.DiffID { - return u.diffID -} - -// SetRemoteDescriptor is not used in the mock. -func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { -} - -// Upload is called to perform the upload. -func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if u.currentUploads != nil { - defer atomic.AddInt32(u.currentUploads, -1) - - if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { - return distribution.Descriptor{}, errors.New("concurrency limit exceeded") - } - } - - // Sleep a bit to simulate a time-consuming upload. - for i := int64(0); i <= 10; i++ { - select { - case <-ctx.Done(): - return distribution.Descriptor{}, ctx.Err() - case <-time.After(10 * time.Millisecond): - progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) - } - } - - if u.simulateRetries != 0 { - u.simulateRetries-- - return distribution.Descriptor{}, errors.New("simulating retry") - } - - return distribution.Descriptor{}, nil -} - -func uploadDescriptors(currentUploads *int32) []UploadDescriptor { - return []UploadDescriptor{ - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, - } -} - -func TestSuccessfulUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - var currentUploads int32 - descriptors := uploadDescriptors(¤tUploads) - - err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) - if err != nil { - t.Fatalf("upload error: %v", err) - } - - close(progressChan) - <-progressDone -} - -func TestCancelledUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - - go func() { - for range progressChan { - } - close(progressDone) - }() - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-time.After(time.Millisecond) - cancel() - }() - - descriptors := uploadDescriptors(nil) - err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) - if err != context.Canceled { - t.Fatal("expected upload to be cancelled") - } - - close(progressChan) - <-progressDone -} diff --git a/dockerversion/useragent.go b/dockerversion/useragent.go deleted file mode 100644 index d2a891c4d..000000000 --- a/dockerversion/useragent.go +++ /dev/null @@ -1,74 +0,0 @@ -package dockerversion - -import ( - "fmt" - "runtime" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/useragent" - "golang.org/x/net/context" -) - -// DockerUserAgent is the User-Agent the Docker client uses to identify itself. -// In accordance with RFC 7231 (5.5.3) is of the form: -// [docker client's UA] UpstreamClient([upstream client's UA]) -func DockerUserAgent(ctx context.Context) string { - httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) - } - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) - - dockerUA := useragent.AppendVersions("", httpVersion...) - upstreamUA := getUserAgentFromContext(ctx) - if len(upstreamUA) > 0 { - ret := insertUpstreamUserAgent(upstreamUA, dockerUA) - return ret - } - return dockerUA -} - -// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists -func getUserAgentFromContext(ctx context.Context) string { - var upstreamUA string - if ctx != nil { - var ki interface{} = ctx.Value(httputils.UAStringKey) - if ki != nil { - upstreamUA = ctx.Value(httputils.UAStringKey).(string) - } - } - return upstreamUA -} - -// escapeStr returns s with every rune in charsToEscape escaped by a backslash -func escapeStr(s string, charsToEscape string) string { - var ret string - for _, currRune := range s { - appended := false - for _, escapeableRune := range charsToEscape { - if currRune == escapeableRune { - ret += `\` + string(currRune) - appended = true - break - } - } - if !appended { - ret += string(currRune) - } - } - return ret -} - -// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent -// string of the form: -// $dockerUA UpstreamClient($upstreamUA) -func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { - charsToEscape := `();\` - upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) - return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) -} diff --git a/errors/errors.go b/errors/errors.go deleted file mode 100644 index 29fd2545d..000000000 --- a/errors/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -package errors - -import "net/http" - -// apiError is an error wrapper that also -// holds information about response status codes. -type apiError struct { - error - statusCode int -} - -// HTTPErrorStatusCode returns a status code. -func (e apiError) HTTPErrorStatusCode() int { - return e.statusCode -} - -// NewErrorWithStatusCode allows you to associate -// a specific HTTP Status Code to an error. -// The Server will take that code and set -// it as the response status. -func NewErrorWithStatusCode(err error, code int) error { - return apiError{err, code} -} - -// NewBadRequestError creates a new API error -// that has the 400 HTTP status code associated to it. -func NewBadRequestError(err error) error { - return NewErrorWithStatusCode(err, http.StatusBadRequest) -} - -// NewRequestForbiddenError creates a new API error -// that has the 403 HTTP status code associated to it. -func NewRequestForbiddenError(err error) error { - return NewErrorWithStatusCode(err, http.StatusForbidden) -} - -// NewRequestNotFoundError creates a new API error -// that has the 404 HTTP status code associated to it. -func NewRequestNotFoundError(err error) error { - return NewErrorWithStatusCode(err, http.StatusNotFound) -} - -// NewRequestConflictError creates a new API error -// that has the 409 HTTP status code associated to it. -func NewRequestConflictError(err error) error { - return NewErrorWithStatusCode(err, http.StatusConflict) -} diff --git a/experimental/README.md b/experimental/README.md deleted file mode 100644 index 3253cb5c4..000000000 --- a/experimental/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Docker Experimental Features - -This page contains a list of features in the Docker engine which are -experimental. Experimental features are **not** ready for production. They are -provided for test and evaluation in your sandbox environments. - -The information below describes each feature and the GitHub pull requests and -issues associated with it. If necessary, links are provided to additional -documentation on an issue. As an active Docker user and community member, -please feel free to provide any feedback on these features you wish. - -## Install Docker experimental - -Unlike the regular Docker binary, the experimental channels is built and -updated nightly on https://experimental.docker.com. From one day to the -next, new features may appear, while existing experimental features may be -refined or entirely removed. - -1. Verify that you have `curl` installed. - - $ which curl - - If `curl` isn't installed, install it after updating your manager: - - $ sudo apt-get update - $ sudo apt-get install curl - -2. Get the latest Docker package. - - $ curl -sSL https://experimental.docker.com/ | sh - - The system prompts you for your `sudo` password. Then, it downloads and - installs Docker and its dependencies. - - >**Note**: If your company is behind a filtering proxy, you may find that the - >`apt-key` - >command fails for the Docker repo during installation. To work around this, - >add the key directly using the following: - > - > $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add - - -3. Verify `docker` is installed correctly. - - $ sudo docker run hello-world - - This command downloads a test image and runs it in a container. - -### Get the Linux binary -To download the latest experimental `docker` binary for Linux, -use the following URLs: - - https://experimental.docker.com/builds/Linux/i386/docker-latest.tgz - - https://experimental.docker.com/builds/Linux/x86_64/docker-latest.tgz - -After downloading the appropriate binary, you can follow the instructions -[here](https://docs.docker.com/installation/binaries/#get-the-docker-binary) to run the `docker` daemon. - -> **Note** -> -> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively -> -> 2) You can get the compressed binaries by appending .tgz to the URLs - -### Build an experimental binary -You can also build the experimental binary from the standard development environment by adding -`DOCKER_EXPERIMENTAL=1` to the environment where you run `make` to build Docker binaries. For example, -to build a Docker binary with the experimental features enabled: - - $ DOCKER_EXPERIMENTAL=1 make binary - -## Current experimental features - - * [External graphdriver plugins](plugins_graphdriver.md) - * [Macvlan and Ipvlan Network Drivers](vlan-networks.md) - * [Docker Stacks and Distributed Application Bundles](docker-stacks-and-bundles.md) - -## How to comment on an experimental feature - -Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. - -Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or in on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/experimental/docker-stacks-and-bundles.md b/experimental/docker-stacks-and-bundles.md deleted file mode 100644 index 1da2b8f58..000000000 --- a/experimental/docker-stacks-and-bundles.md +++ /dev/null @@ -1,182 +0,0 @@ -# Docker Stacks and Distributed Application Bundles - -## Overview - -Docker Stacks and Distributed Application Bundles are experimental features -introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of -swarm mode, and Nodes and Services in the Engine API. - -A Dockerfile can be built into an image, and containers can be created from -that image. Similarly, a docker-compose.yml can be built into a **distributed -application bundle**, and **stacks** can be created from that bundle. In that -sense, the bundle is a multi-services distributable image format. - -As of Docker 1.12 and Compose 1.8, the features are experimental. Neither -Docker Engine nor the Docker Registry support distribution of bundles. - -## Producing a bundle - -The easiest way to produce a bundle is to generate it using `docker-compose` -from an existing `docker-compose.yml`. Of course, that's just *one* possible way -to proceed, in the same way that `docker build` isn't the only way to produce a -Docker image. - -From `docker-compose`: - -```bash -$ docker-compose bundle -WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring -WARNING: Unsupported key 'links' in services.nsqd - ignoring -WARNING: Unsupported key 'volumes' in services.nsqd - ignoring -[...] -Wrote bundle to vossibility-stack.dab -``` - -## Creating a stack from a bundle - -A stack is created using the `docker deploy` command: - -```bash -# docker deploy --help - -Usage: docker deploy [OPTIONS] STACK - -Create and update a stack - -Options: - -f, --bundle string Path to a bundle (Default: STACK.dab) - --help Print usage -``` - -Let's deploy the stack created before: - -```bash -# docker deploy vossibility-stack -Loading bundle from vossibility-stack.dab -Creating service vossibility-stack_elasticsearch -Creating service vossibility-stack_kibana -Creating service vossibility-stack_logstash -Creating service vossibility-stack_lookupd -Creating service vossibility-stack_nsqd -Creating service vossibility-stack_vossibility-collector -``` - -We can verify that services were correctly created: - -```bash -# docker service ls -ID NAME REPLICAS IMAGE -COMMAND -29bv0vnlm903 vossibility-stack_lookupd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd -4awt47624qwh vossibility-stack_nsqd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160 -4tjx9biia6fs vossibility-stack_elasticsearch 1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility-stack_kibana 1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility-stack_logstash 1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe logstash -f /etc/logstash/conf.d/logstash.conf -axqh55ipl40h vossibility-stack_vossibility-collector 1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba --config /config/config.toml --debug -``` - -## Managing stacks - -Stacks are managed using the `docker stack` command: - -```bash -# docker stack --help - -Usage: docker stack COMMAND - -Manage Docker stacks - -Options: - --help Print usage - -Commands: - config Print the stack configuration - deploy Create and update a stack - rm Remove the stack - tasks List the tasks in the stack - -Run 'docker stack COMMAND --help' for more information on a command. -``` - -## Bundle file format - -Distributed application bundles are described in a JSON format. When bundles -are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use -`.dsb` for the file extension—this will be updated in the next release client). - -A bundle has two top-level fields: `version` and `services`. The version used -by Docker 1.12 tools is `0.1`. - -`services` in the bundle are the services that comprise the app. They -correspond to the new `Service` object introduced in the 1.12 Docker Engine API. - -A service has the following fields: - -
-
- Image (required) string -
-
- The image that the service will run. Docker images should be referenced - with full content hash to fully specify the deployment artifact for the - service. Example: - postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb - 1c24821a 9e83ef -
-
- Command []string -
-
- Command to run in service containers. -
-
- Args []string -
-
- Arguments passed to the service containers. -
-
- Env []string -
-
- Environment variables. -
-
- Labels map[string]string -
-
- Labels used for setting meta data on services. -
-
- Ports []Port -
-
- Service ports (composed of Port (int) and - Protocol (string). A service description can - only specify the container port to be exposed. These ports can be - mapped on runtime hosts at the operator's discretion. -
- -
- WorkingDir string -
-
- Working directory inside the service containers. -
- -
- User string -
-
- Username or UID (format: <name|uid>[:<group|gid>]). -
- -
- Networks []string -
-
- Networks that the service containers should be connected to. An entity - deploying a bundle should create networks as needed. -
-
- diff --git a/experimental/images/ipvlan-l3.gliffy b/experimental/images/ipvlan-l3.gliffy deleted file mode 100644 index bf0512af7..000000000 --- a/experimental/images/ipvlan-l3.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/ipvlan-l3.png b/experimental/images/ipvlan-l3.png deleted file mode 100644 index 3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/experimental/images/ipvlan_l2_simple.gliffy b/experimental/images/ipvlan_l2_simple.gliffy deleted file mode 100644 index 41b0475df..000000000 --- a/experimental/images/ipvlan_l2_simple.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/ipvlan_l2_simple.png b/experimental/images/ipvlan_l2_simple.png deleted file mode 100644 index e489a446ddd255ce9360445f0f895acad31ae214..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/experimental/images/macvlan-bridge-ipvlan-l2.gliffy deleted file mode 100644 index eceec778b..000000000 --- a/experimental/images/macvlan-bridge-ipvlan-l2.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/macvlan-bridge-ipvlan-l2.png b/experimental/images/macvlan-bridge-ipvlan-l2.png deleted file mode 100644 index 13aa4f212d9db346f307dfbe111fd657406bb943..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/experimental/images/macvlan_bridge_simple.gliffy b/experimental/images/macvlan_bridge_simple.gliffy deleted file mode 100644 index 9d77e1f6f..000000000 --- a/experimental/images/macvlan_bridge_simple.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":328,"height":292,"nodeIndex":215,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":328,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":42,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":44,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":89.0,"y":22.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth1 172.16.86.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":210.0,"y":80.19999694824219,"rotation":0.0,"id":207,"width":120.00000000000001,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

172.16.86.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":1,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":6,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

172.16.86.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":34,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":22,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":31,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.45415986949433,"rotation":0.0,"id":121,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

172.16.86.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d macvlan \\

    --subnet=172.16.86.0/24 \\

    --gateway=172.16.86.1  \\

    -o parent=eth1 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586216662,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/macvlan_bridge_simple.png b/experimental/images/macvlan_bridge_simple.png deleted file mode 100644 index 51fa66e263793573edbad30350a6e59cfbdd0256..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22392 zcmY&;WmFtm(k<@p?(XgqAh=7=MgqaDae})`aF;-E*EH@DG`K_KA-EItb!O&$bLR)F zUU2$U)vjH;jzw#z$)lr?pg=)Ep(`r9*MfqA20(rUkq{t%IiUa30tKZfsrX(>$9wf8 z@68$ao_OR1bAz(pgPkCU&o)KOcCK6Dn!#}njeYe1{T4Mx*EWAxsO8DE`DC>=2N|3; z{F4I@*kW!o0LJ-+?}~*s^d);RTnHEpNJed2KHoTWQ%o@X-Z+O^eDy>xo-|!>?KZ|) zCnokPMrlqJhIRF>5Qt12)=wXXg_40KjTEkoM2*Km9p*0n_qPTp3`=?&M;htte+GT2 zjr{*ZnE=xN9{P9C?Ct*z{X4_|&DWkEX$V5cMhBm?hzfal@H7(Ds`VM!fQxgQc0^`D zp7sO~7e3?E;_Xaqz>Brr!%(~o{21NIcq19EF8F>PN~-^TA&T!_S)Mi1)CRn(NceX+ zR&A>8XFr6kfSEX*sGg_$NFJ}dhjLH?pNTNR=v~$(jzlLnx-`<~U*Sg1e8Ap1i6v*! zD4nR)K)HGiXRvxWYtr}~<+<^-rO|!WWirSl515DJ-h$ccZEG9FOhV>|;)HypkCwe3 z1c}`<nXwj4_cCbFFm~6)r(yGX6Jc${2;3~*2=!^lh&DPsl-{N)lf(u zrt)DP+&fzG(n5!#i=UEC5hYMEz?Sthc}IdgL1m}V$`?vW0g_4$(CMw6EZ;@HC~eaC z#c`lSxLdI{QIoNs*9cp~OtLLsz z%5g`_98bj+PbR<$jSUL5s(dRppHT14FY?;BLvVSlgavU~k-- zE8vhr&9sZ$BUXC2dv0lH6MTUw8`;m_0;)E5gvw_Om^Z_F(roR*7sc&gVhy*~`Wqa% zN__OD^*k&hkHDE&q-YaG%z$7+fKl@w%IjU#>xhRdRWU}uu?DF1>yH>+_EfMBMvS4f z$^BZ8MNA7`TqUV)M>P=nD^PT(N{~ymhbjUl>_MqBM{VH^xyG}B#9$BJZPq6y4P1L|+YW1eaK=q{n0lj{tWzQ7kMNGw&^B%*t$ z$K!TF`~;UP@UBKHt)ct;NvGRivu(89+iT*AyTZO8G6J@mV7Mo!hE0~}5b9KvR=V8+ z%-OGE9WAw5B_BId*&${7SwEcg#_{y%PYaT;((x3hncbpM%5w_uH5OX}u{J17?EQqV z2Y@K6_tlHje5(NmA+oAmO{jc{bYo5H;Z@lh23nT#YJDaDEkU48RtV>W){{;Hq`BJ$ zTq$KB=W&__$KP|9U!%&`JIy)yQMWWPcej4#^hq?qk}d@@uczVhqU>ZxLm{qw5<>dQ z^lAtQPM+w8Hp@U!VBYp&aPGHf^@>WHW($6aODcxL7SM~ zmUQ9Zd6>%9c-J0|CHHY#UO^3Zr=;`!36|XK=w>Xy@H<@3K&l%F)K+6D0s3z_p=$;h z%roqtlZx?j36mxUj5kHeP8dDe*dN3n$Lmj7Q0)49)WUbiud;%{xFX?eHDn?N1UR?h z?WEJcQ;oPoKK?N(qn9i7hB>aow#Pehr2?_W3{!^#zRgLiSS&_yfS8Y|k-LIUd zTewSRuv#EPL)jlgG#Gr6@;s(ayx(EPDP-C;(%Q&J!nLc4wCt%*v;>F-^#PT8Lrje* z9ie0;C$rWtovs)S^gptHUr~#t6(uwqBC*~oafnIMV12Jn-c^djcgk?Ry8@*iW(eGw zm5XiCEvruV#FxE!Hdln3z|D#3*fhl1`l%`|Tc?7;K!0^Q^n~gNF}M%vFa|I02h z2+=MDd=aKPJLlN$w$d58tC_|zkEu=r4%wQSzwnMUIRktO#Q{wvK;u1akaE1^_oWB9b#VLKrTvt6FVLGvMhkBEEA zazIrk^`UZ|tf$6G#>ejlHB~?T;Y0n_v79f-9{#85RNnAJOTV{ugYgo?xl!hc*U)kp zwtT%c|Fls^kuH2k*|BWgPu^tg!cp_(pmJw8Pq@LjDh;>~mkF-n-L0D9T>k8L^TxJ* zF)F)Iz8z5cVo0^-PN62>R;3XxbgN;Q)>&@Czsz1acahpyKcrFZktt)Vxt)L)LcxAE zBt?yzF|84shHs6)YUIj9fI2!#Q6JM?1pm5}yAk9-IO^F;OX>@Be8!N7-s*g=eV#o@ zVe)s^M!7fY*<^%U7w>$$ttfG{gPAzYC;x7N=~Vvx$zU4k@}}WU$6z@{dSM3+@Z6H0)PvB7us1%Er7a^Q zyFQ|3U~a^Qgqqh*(g-v-PK)PmCygQu3_04{8@#p01adQC3n~JI4>TYd zO}dmzcR^mn{^(AN%sT4LJ_{j#7My6CcBPM;cfwkj8$8*{dQ{7-&q$FU+8H-kb`-T^ zq+LN?Upc#fQ#dN*cngv*Gb#sM7c$yi_~OK`SS|C4(Z ze1;|Q19kR~5(@scq6V#KLLyNN(>C$@U_G4bb#5GyPvbl;X!?mf-)D)^m#~nDZrj&g zU0s7LM0U8WzN_W@=@wdI?PZrcw&cG2gl<O4AY<7$?=u;z3FW12}1E3rP5Y23!f=L=_tz5aBzb z&KiQ{j(Fv9A=!DFGITofw(~a3UUIlQ;NPy0y}7;pIFdkaercHf=H^q@>dOn0(X~Ok z<3jPGPJ8Jf242>$$qd>!k}W&~(c; z#Putl3^U?c_X0`G9@KqJdpDbV;DZXeV7x6#!U5F%i%eOCZn_1cfBeR`kI(y)P-S>N(?dY%yg|rzZee2$E+v+AQRB1c*{I~yAnPIO^U z4`rX3M(#*nUY?D!^WpEX_I8o^`T4^3_Entr+uK_rBBI_B<%~gQH{0ce;n#Qsz*yG- zREBOwN#e{zHbt^!@Y93+q&^;`ZY{S5e%smFMjyK-l8efL`Juh;&o$QKbaeo#GgwrGhlUE75N5q)esB=3gh-{g zL>8yH{QW1Gv(@=+9^m!jHVzPVVf(LDm~{`1v0&|hB{+Z*f3Ac-3C&+k3}Wpy9`ZW_ zg9|^jz?6}`_d;0j>3SpP_xZt*0wy+s&O}8DMKd22iJOSw0ocq3=BZNZjw<+`aV3!p3TquYdhq`hx*H>uLaFUqxYo4iHH+_a^&S zh#kQ}%CYin!E1AKurm>M?>HbH{tFSnzY)LzZ-xU9)k`S6#f>7H2oD5=9oj;P7BH^G z0}m@+VE#SI_l-}0cqDPoxS8Ss_wzJ^SL;cf)&2kfr|A%{`H!1-1+1rPk7#D>3*ahQX!n|$UFQ<~{`%lC< zohU>&|Kl9QS0`bxlW!UM)7q4i``nuEzrJ4!=|v2}{bK7wte);q$m@)rRa9wDaxoO9%5<7@;>-|_V&0Qdf^Z*C+z1ne z<3dS=Q|dC{=NWiaIeq1*$_F;drVpO|k8^-W0b>Hm{qdniDTYGmq0I0znSLG6q( zhId#VHYP>ViLomGN>A~Yj!y9bmbupZ<}z2oS=vPu46LG(6`>81ShtFHK*|MZ-hfVh-(#kMD#CrN38toJ4 zK1D!^6d9=6m`^|atcIXqh7onaHWP2+Rna{i4Ft#l?sI?d6Pczed@G$&h0+Wpo@T*~ zDo(ZhDNdT4$hz0%01|y;^WixEz_2ublufP;|D}0}G(y$$Yu)W&w@x ziHq##eOlk4$M0S5uMc?!;0lpvhqM^xJSmG3vqLx+vf;Rl$$I6zo;$N#H6_@Fn7^XGtEP_0n++n2ZSMb0!9*(;fXM{uM>TjkY` zgzQ>mdE>n98Rri=FjKfnVdgs(DuiH}5PTvST%>_S>W^^5D6pB7u;c*%v!Im=_CxRA zefBxTzNf~EzTLAQE;cW-da#NoP!v4zOv%#5lAt5<6~5rhFyPv(*wF{o3wy<|}KDZtqc8DbY+z`LA4&&AF~ zRLb=7u>MH;ztv-`NH3#oVdG-B%wN3ApPnNSqVe^^k4x)~!;x!@PD6nakx4si4pXYtg(i0Gg1^z&%*Dz zh=yEoml7Ui4K|1a)YB6Uxp5EAt!TjQJQ$k z?6OP+rL$YS8YoKPicc|th{=)vB0FR;{;ziTFHwYiRtV~#1LwD2L1BhsZ4OxPqP?DB zI`Sido~6%Tcd@({n&?+d124wD1&!MRA;WEGf=%GQLV3+Mb8&EZdo6`G3@4$!sVbos z12!5?B~2e}wQN7Gk3N?Xa#G=d)f93KzSLw!J>lSqyugBdPWAx;@>$TD`SwUvCJEP` zyzqREfpkw}wluW_S6B-5=8p(Vyqy32P&umrWtbL;Bav28Y6%UoM zNL!B&R@0CegZpBRzv_pu(iq)0!73k)=0aF&-WJUfyG}Aq%97{bk$!&AfS^Q;eaX>2mit;3*#9cVfrj8Yr(yo=B)A`!tg^;_)aSA{{iz7JpV2c#cIh4BYuc zUKtsj3st6&?He?|K~RCo|C{CeKj1|B4CJq%EAxLBX;{o}iJvqYZI>=LXSO<&-RUM=uQ;B>Q2z4_zUy!QCiDp&SH9b26a4r z>(veFXzWGbkYmgRW1{d*P#f9xwz5xs;zC1}@Yt`^DeUO8KaD(e1MIy?*NY}zDYxWWj= z2>PsX>^H2F-(yGx!RPNTWi{Sdndk1arDLT9He0qIe7cRk>wF4mh=;0Ca$e$`yqR4k z>W=8$u{R9p&(|?s4Ew~~xr%xJqbb})t}*~LO=!Y46o3OHZuM2)l4GhjCV;Hm!Ru{q z{=FL@7up00ag>=G)=oc3TU(N}!R@6j4MZC5vX{6VFmpR->y53% zeecl9)bf7A)|T1|1w0GAhiuQ|pzlxF4}lj?qDX{rt>ZORNWiN-#e|iBhwVsXu+wH| zR>0J|fQJj}v-P%6&yM)<49?yDa8!`{5l*d1S4@te^Um2uhxY{-I$!K_KSh%QU9W#= zu1ssZrl*=c0)VFzaSP(cxG<#Ts^ zd*nk4E}`VAX{R?fA|}P4Jf(jsLI~WB1b7&j|5%vezGs`lI1xzozNiFRk1ji3XxL|G z5ZUkXsO(3OG6Kg-PNzw>QPSn*2<7f*5-rGO7Xboet<8|f@gh5m8kG<|44XJ`r6Yd# zJF6rUg$gn5v$+}_4mc)V8Sk7j!r ziwzdSC=2VNE}G{vNvSWM=NsNMZCcs$GfT3m?48si1-SxFPayaOt`@B92!#-5i zb<>=C$;0G*B_5sUNaputs9qbBfK1=&CnW+5A~9_t6|=1bszT;vamBT z&0jrd-P^KEd|AGx{S}8on$UB$)l={lKVsXCbBby@|BhFUgUiwmh$KhX3txI7jNsX_ zrlo3Nh^VtbKsNG@S#;@?j8f>aXmu$o&NiYaCLZ7us!i^dO4 zq+&Eq6d5ikR7)LJlH5Y;JhWN!wh$$Hk_D7tF%)Jr)5B=F;HQA!YNVdA=Y6LKs}T_s z0EC5fc>F09c%oarLbPDbE|HjlJo_K`Z|@ zIrrz48yH!0ohR8j zLUyi7xx0@RQ>I=ZP1lv@eBL6bzNopy5@Vvt6Ixg*0wa0o&I{ehm0UeU#i zVCs*Lr;o&|bW!;DOGI72oS^aY#g(4uw9xvzO8+E{Ld4xB)f`G`((oGHJXI?-P;6`t z>NXl2Aqoi+&H@DeJ6!k<&^7AzjZ5m`x0jFDy4_M0+6KeHAwNJ&^&dr@3EXQQbjG}L8?E@evi70)(oG-HRwOnumy_IWOHQ~_g{9^QG0 znzZhx=p7dh_VPnpUTm_mld=H(8QFfd=eIt_x~jrsG56XN$^E8@RhRhlp5y=t$oM5P zXHw?)#{hbvp6|4DrN^`o%10a7K{xp3b+e@JB!wn%=578<)*7~LRPl!pVt{0pj&d@m zyW4xTQi*5egqQvptXwoG4GzJtwK^)J!k_W7k}wa1u1b}na|3_pDFlhSTpB3YjjqTB zlWdE9dw4V>ULt0KF9~@qfuq)q2GOO9*Y9PB?bRqgnEAQvlVwOSmd(06W+?=*W@C4G zU($FWNzL&cCGfX3tumS9FgOg@di+&H1oK!O-5i)!9b$$Gi_g00e9pB#MuV_?* zo+R&TU(0Ha(GI6q2#x2Of83D<1_!Q|s!3;;+NsT_g8ec)vuJ2a%&T;hTBL)qZF>8_ z>C$oscBqGk>e@YEl@dH7y8YcQbSN>thw@en&^;{&+fBCofZM7+ z6Nqr9G)dEoPc~edgu_D!!Q}I~v@6CYHdiA^Gp{m#p@3w=W zJzqXLh4=s<$KLwit8|KFrN!d9wp(HhjicYgBqnmzMx+%OSPgR=93Fb9I&8DAZ&G|a z(9qpp?%S=_zTYPXz^RH}ChywdRl?Yh;=M1kBQW$K za5SJN%oB3dP+K8K;hMyf=j84B{)2F;3wxThZqoEpwS?7ZyHiN{aahtvU%e361Hzjh6-K2Xq~ks~fEV&s#8 zb?(c(gCAEhG~|U&y*K=AY!q4!I2hmF1Me*jfD=k>aLUD@tjvX44)H$(2lbO)!&jQ~ z_>Jy(Idpwt<(q~`Qm9_D?AGviu(+d4#pfT02-_rC-*!a;1|87t3SnO)#>7@L<=eL$ zGFCW@yzbcaz1+8wlQ0OV?#}3WC%t&>lUg>Q!07D3XTk>N6s|p@%xhuGn=t4b)BP5|9ADBu6=&yddTs zF`i@o)g~rp$U#_{dV+Uzwkp64U9gGO7k0)*r|l0jR-YrWQP(QJW(y(om8rG0brO3} z9BlQiIkWWf9(KTCv3%lB0{fPWDi8KMEH7)b2IS@jK_H*;N^%(xN!qZsdEJB+beiS~ zon6s9m%n{A?eUVx{v7pvu2kL)6YawB#>MueIGdQ`p)BST4eoKpm7~c!S?he=oc@i( ztWIC>O~LO9B^H=iuz(~mg50FuPRfE3#-Xr}t`z%SME}G@T#7-9g&LcWCAMY3j$t6U zy@aE+Ef1#kC2=8OP#*>yQi$~_bFJw zjj!I10Y3&z>e>@S3xgyOeru$IwU#8494rGk-aO@o>_xZ$yzPa0u(biiJ0Sbev! zskg<8i;D_W7rbZ#^_`KBy?_+NSY2>&L%JYKItg3|g93DOXQInG0OBFP8JET9;+?(p zcR%4GJ=o4I$g&8cz&_}BM@{a=I7r!W7wT9A0BnHi6a8rQyPquY3_OIPs3DOR3Kb9u zqZ*2}Bst2!(}21q3=oQ)f(sRc0GI9eJUlKCpM6h@!VOsC06HzN*gK3YHUfQV#I;DZ z_oMr+rK1>VXv@s4lm5oNf2|(d+;-%{n*38htN$i zQC0cP9>sGshCkV%54Z1<7viPzP-;X{r#OG+WKfm&t&Lmy) zy5Fp_n7U#0{BcX(6cLT@wHNdcMimMs75d%0-O!pBS%s0hI`Wcmjuu8V?j$5ejn&rz zry{JYr`M+26aVg%6Ujj!X9yvS7CWlsYV22Dz`ICk?8l=OCV!r%%sSzyL}!VjfilLU zi;C#k<4C7R7(yV7AaLHc$tZ-?qN~Fq9{uCSkBG)W5xWv`|3@7^cUhqQm5uO<5%0z8 z{=TJ-^fEs7gYs1y4~KfmuXv5H09Pv^zO(O5?4^KsQ7FBFp@0@@nd{=Vg>v z{D_w`Ix(AK!oj_SORE1#gNUOT{(4<{0rRd_>PWlSBQ*r53{&!BPVL&q-}?;+^6>&< zi)ZX|8zW#a6+I4e1M`M+h!GPWpLL4^S^o4PDFe(*$$6@ai))&RB)5}9zDd4M@6w69 zB9XTHuRS;lRqEvbB2r8X!?Q>c$B%Jo7JZTiwrt%%)sYR}PI2gLX$HG8W3$~IuM;g} z1z8#o$eKOp=l|a%Ug`ix2wh{!N$imr zOq(c)wkjJuX9z($@h$D>i7iHi+$uugqg)jR)rQ=>JlVw%QYks)00&U?MQbH^YI^#3 z9cj5D{QfXMx_9~c``E*UT(ZeA*+?;SH^^^N>Y1Za39*C53J_J@{!fyLioDhENR(69 zLlVWIlh|_IT{%KRw%~YYQ{9jejNa%imY^83;jYw4+44#^@bv>6;c-iYzRKZ%e6*Y2 zg)a*X?fq*--2DLS{Nm>UKd~+@mjXz5m?J4SK6-<3n8PXmg>AP2Y!dq>Lwq=lcD;2i zXA=@YYOrsQQS=lcyqKAm{y*#|v_MA`KS?y3T)ZRU6j*|rWCdXvRrlHXFO!mz=128{ zH>!E2;yC6KH#?*dWpE#GuK5vNBsyWBxptqIV{*EC;d6V@$vjrQSwOH}O=CjF;)@Uv za#<5^?Ms9NTX#C^enUv%K}umi-R-O2A1hbvQ#3B{rE%hCSV-)%A(_06f3MKgC}lk& z&Cft-K0HxjGYM& z*xk5Zd%gb@YUSF6zHj{DbirJW(f8)25Xga%omvpwBrQnjOJU0Ip+CVC$Y*7NTN-jp zz>ol-SR5xnkxuUcV($aaaO;!{-22)8Av|l~nugpVzgx93?;KKIcX$61*?XHovg6rm zcd|O;{|H&R(}2Y(F1TDYOwYG9*3U}{7wK$wBr%PWLBcz?7P=E}UA@~xZ}{>n{o`xL z_Ur_o^jGXqNDgra#<8HTXKn^t!u|)(z-J$0>;;Pre)9`{#2Et$dok#DbQ?DdAH)}b zLy9bbO}`>9J32b0w%GH+|cJ>l8946;$g={>#?C;3zvbwA-#x|dY-f;&988oVCL^WYD zGM7@9N?{WARSx^wOMy6qI(CJAEA$dSc=WULj%VQ%hR** zP>_7F0ihXzu=1i3`6y>@Zz9x#jV-(G{H`yf40|Y!371&7a(jP#(S$s)|E>Hp35k;c zU&)@1fES;yzsy&%_zIAnI5L`^XD`wtnyjaBCOAW$R1QE`%8PjV@}%a)C8~cd%6Xqk z+%>t=!k&Yi%>UgLRxcrfQf!on+90G^Se1SaXv!IdTP3rw6<;Ds<1pquV<7+q;evvT zmq#XAwp8c%P8@J!&LEEl6t5$}MQUX>*0Bbteexksd+8R^Y=B8wsA76|^Jd}Uok1Zu zsA4U$j_Ch%%8?Q6KL4G9kud$<+TAgq+KlRzNP!XkdBFrR3!NAXy@r2Ru5{61QwwB53U+LnGi6@EC6?XWJ`o)FtjdyCJht)vY<2;XV=Cp4&EOg2`VVz znBhOZl2fX~-#6T7QcHJPxWl_n%9Wx%4bL19^%zXLsp@fqm;XQB4T+3xkkeVBj)gGM zIWA!P&q?jR4|uZrG#Cf+eJ|rV^wbpI9-Ja1q*+d3W<`_SRB?JzaOq`Cg9*li|L%Ov zyAZ*8J))Ek1HCPXpZFjgK{^s~)nLM!$Ou72BgZj6tJG{qqp24QPly#5d1oCC1$Aw*x%)pr&h*JhPkfHv_gSHG zQaWL1B!m!BIo)*ck1N9eSz@~5cH2nse7v#Mw3JLuO$`wpaQm(+%X4Aq78m5t4(cHl zVjAq?+&|JD7@6Yj)Dr5~W?+Cz2d|nC9TTtt!b%wtkyObiLg)YB zyocyo0=X#L)5BDrXbE^K)slyJyv@jX$)6LXy!r4Q%WF{XS4VZR@vsQsVo0-D7+!=G z4yw1|fATbREP=CgcNpm+4q}#~gYeL+o2&oY4|yQ30sFt9yN7^xsIwQ=!ZfiE`ANQk zr+No?s%sn{WgWJ<^UMMW*Wv2|=>}^8DNAEB(9D_#!`KrAB7+L!-a#!Z*kX+cBsS1aq3`y3m#(nI|LFCy1W>4=Y{=p{bqBmoCZk?b9jKSs`*Z$<{;2N&bn* zY^3AAV9zm!pbltpk846MPqctWHp@niEmdf@IIGgY!#drt%ocM(<7QAn?B3)wM)4%K zlTSpf;6L4j5aQZIJ1i65aW5NrjSHeop5jYbKUP>hJ7PRivh0D)yK|Z$zL2|yap=Wv z|EF)@)}QrReU0Tp)#PwqYUha3>sXUB-woskY-|aR2W{M9NVB-4p`ge*qW+;vU4pN{ zSWA*au3hB?$oo_qL>ut}BcN^DsC%e?I^&)UE^5!TOv>r&lVS$3^HoK=<2Bfl_H{j_ zy+{M@_nv;)sL&y1)t^e|SODJKYpG#=fjIstwC6sbrK1WOjvoj*@?)3Lj}F=NARZl4 zZN?%JJ+53n{Wpn6bbaC5mMOZ$00&@l$Xe0ucSjTEO}t=HJyj%^Q(P4|f;+dQ=RexQ z5X#{Wwy2BK5`Ow9 zPN-qrAX_It_4_2&n?t2)w>5j53sQCwBdk%(?2f@MfR^E73|^S7)+$tlG@S- z1@Bu2)qeaVm81Pkw6#_gkX{h*f4X2ZQnm|6$cg^z_}G5+-0O6e9{#)RdH;7DfJ2lG zXk{io~czlMGp zVJonfIauLeJGPQ)fFQ^n#1l&qBCDOhnGYBb1YwGF6={&Yi z6q<|3Z5SKR!@EZ^8{xb^U&rRyG+=`Nj&DU2QIAl|SNFcR8PJ>q;}R~ii59+JpMz%% z%r-0nbiW^7x>I>4I$euBCEn%Cm=qvQpN3YToY&qPtsxs z9oc0iCpmE-NEM(s31<|Vc@Z8GAq|z6<9tV1J*FLYTCAN%3ppymgAWzQON@B1HV(!r zX-ko-z?wqr|7L^0T)~u8JcU(~O44`Znk}8gXG?Htwje#W_Y;gzzz4tbIY}ePpPvP^Ez63d|ZNrOGABT+&$6-&poGN&4x`36V0 z6nBio*r>xnSdQo?V=JPqvbg5fDNfRRD_1clpMtz2jLfLOjL$^yJ^<(2nx+M`E#ef?pLJ}g=nrTaz=b^yf}s3Zw7;++KPfgy^<7RfpK;ze&(rBj1|@D`6i=s zE-A6@H%RvzUR#cf=a)1F5P9IitE$h{?PV2VQ;B(pD*LXc0*-YEcK%eR?*kDhpl5N^ zKOvbooZ%;hf|krkGfa=-5PnTSUhwcQP0sAkw8aY0cUd9!oUEynX`{Wu*-AO}dciZVvUH9h!?s{Ut;F9XAq>`t3lIeR36-_2om2#XvVq_g`dmGaLI7Wp=9@^Z%D z5#A~^sWZwN?Nly84P2HUiwtFzmR{Z>Y7E zxxVeD)G~EiJcG{f+<7P+n~#0sc{K9{S&pnYrgH3S-aU){NVB=Ie0X1BPNj-+>+Z#W z@L}>O=ipL;dGL5nl71FF-u05|O|D)an6 zf>ogG%e`D2kgs39aHaC&qyA7%KYF)|3ckv1->=qgoKHL#Ri(UTxQn&MYt40KTFeHR zUI=K)y==0C=Q1LRir6;cNJynJe%2xf-&{OWCf{{L+}TFV#kLh-vz=WX*AS#nQqYfgx~x}0IYT2wS=2}2fFs6S z`KEZR&+D#2?2-Jn9Aw!utVD061^n2(a%SJXk}`QXHXD>j3$qgL5Kl~(;jA17YrT%D!~hOJ!J_ui@`m;9Tr_<(sS zY>5;pV*mu+7qdtV^8O6-Xa@n&$(kN5xCw|=UXH&o%Vp6lr=?}yWXZO$yF8nVjmjT? zJ*%ru;;5V?(xU0DODvk*~j)|XKG66f45>yH(3x8%M9CBisVKq5HO~n zg9ptU5mkjqW0q#$r9eG6xilf*0>QO5Jd2zyB&7@pTauD*V^az>^^j)Q0xJG_>LA=Y@|Kq+WDJdAji2T_z*? zGe5LNA4U+KQxTDo;YFyO*HfYd#m3AqpN4Iw7GSaKSu%^M&%Q;v zKvG+VAFvzET*(z6Ojxj&!(n%Op>6StttTZGi#~0Y&)g<8KFcH8MLa7x;@hR0>+fck zsO_b=vMRiK3KKD`9a*r<6hjXF#hX9ku&;=L!9Bf`4gk^dCJ$~fkoaI?cpsTM znc9l>BNyS$yIi;d)!ZK-P`@lpsRc)-hJlWPl-(uT?T&Rzz~M5Zsn@TRv-{wpwdu-6 zla~DJN8JT&LF}^^^>3R-915)Zsao}BIQmPMFKR}n%*gAK&4$Prr&31Hg^cqb$JhKw zT+7zU16*d=Xh@}(Pb10X9tLn~U&6j3+ESjpx`L-A-^hBNuTY^Ff{i{Z2KDK`%m3|r z-t~R!{3QL5uPpy^gaOe1_r(va8CO6`GmWEv6U{0qVgs==-1aKr zb}VOi{pMNx(Ez3-U#PwF%b-2NE)AyST-^AIqv`)o2S_xXa+%<5|b~Y;~BrgltQCVHR(Q_N6?X|X6lY(1*GB`1kZ_m8yTju?iTtsLL z0EEy!Gp=nOA~XbCj=vu>fe`HTOB-Pzvn5ck;W}b+T*8$j*5Ae1Y=CCHImdcIPPYE(%n#8;WsKYDm=tUJc=kqeTt(^}r@w&bs{|1m2YeOBTdpk-Pl6#0NY){+z0$ zINIbn#@0OzyS?0ZXcRW76A9nXU!bHcMcjDgJNYXu+9NCk1HKN=-k z6#Z&ij_3FU2Q65dhHRIv$i@C_^D`x?HDoSIK@W!6+Q%Zrq1;}*u|ql`IIR7}3M=!HS?!@QdEys$J6)EI z=#$(S({pI{68Y|)fq7onFh9(dA2|wWAI`z3*101 zX^njC8_$;k4M`qG_tXs0WbY2rs7}euI#%(PdIU~@J#v#HIazH9O6(px%XM5ku7tv; zcnBA&^0*|%MOim~RV}H`k7JZP{S2Dh$bRcIYGh=CPvELyGT-EanIA)5(AHvUm)hXo zMNF{JU%kWFekN#``&%nWVzVLHc`0@uH2Wgbg=yPJr_+;M%^VfWJOFN!jj5{x_YeonD&3KeTO$FVj{(~#`^K(#hdK?BGS1Y*^?4MO* z@?-4IkNxQbj*^)p|8Y=3UR)oCb)d#9!4KI)Yq_ytWKJXg2nBT!3`E2AF@AZGECx(W z9IsndS+L6}!!kmz@umvLuBwZMCST_I$rBC{0L9S@t1@nJl_;ZEzOg@{{+TLdS?D!v zS?a(fu_WRF)&hr#0hlK)VscBBv_LLfM!6hK6GFtri#{81A2ue$1FI?c_L6B^%dut$ zz>uLDPhUvKp0=w|k|&>vO+NiiXn^Gc_-Z?F0L$vK5eCEP7e8?@ z%zQ84KA;t3ny2pF6xk_5H-Smsc)z#}(O*d-I=ds9qQ+5rZ&c+0TDg$UMU4rrJcRV# zy{?;{gI-2EabioQorhi<%geM3Z5g{(D$X}r^F**-a=TBB z*cn1UwKF0k2b4**vR~0+)b`(&xeDVduAZI)-3(LYT3Ll1Bn^O{ZA-J(Y^~y`UY{?u z=InSrEqWSs;`KX;rN2X*T5$_{AMi8J>yHP$@ z21xRxc4NMRBrM>dX|_}8kNU*b)(QS}M+noO>=|NxC075awCbDoPaS_|zW%!jc%=-n*?h!C-ZS*rN`=)QXS#C=D`s8F z*~-;yr+7Hn1TucJ0JIZU(bUeG_5q{3Lk&WC756iN^7+ERS~>zwDfDYt5$OQ=NH=K{U%HH6JvM!0f-AhrKs%*b;KtWA#VhIXnYixUH&?skO@P;_ zrKRi`^U%o@ zolj*F@438ppwC%$<;-1PiV7nVZ(KgGBoe&NRBE03KuV?7JzVf<;6FY2SAgfSy!D|vPNGc9ukd9?YZp~`6k+E1zIU!u=`B%;rW)t;txlf{L z^+T^rfAxEPw@M+O7E;)|`_#&QO-?E9_Ti7W))=HMc6T>DX+vdf9`8GziB;6Hw@7FcaKX-oH-ePM%ZelL`$KSV_S1Gtf-f$1_cwb+)L? zQoMHnGLxD9H0n*po^hx-)jC93)7x;DWRD=FKk_jh>4mS?&skBDy^!hOvce8`04>(? zlj;&ms#rAY{M);`kY_PWAjg>!6YoBe^KDD7C12W4F|;Q2QCKsO#*x*D{d;>a6Cag@ zeissnGUik$#}9!uK$#gv9qxx*HKhicz=lo>ZN~U^1zrBQB&2#*GFTb3=(LeyT6>ZD z2al;9ww6*hH@D5$VYBPo_;4)ozR8k6_%js`U2jgGyrP?*g`_lPlcs44xt<>*G-V_6 zOk56NoRiQ4xe3%&EhddN;8wWUPW9nv@hig_jXdY#4vPvVRCu!M=Kuw*T+PHr`;J7F-CE8LS7)r7THe!q-Vlb6rYA=-<+@OudX z#9Jt6677b*+$u|YqKqhJ%5TU7oqzw5gX(T&EG(Apt%!cpC;R$Y^b)tTrYL4a+Vt*F z{pY820+3E}aVsAH4#g3pf~aKc1fQ<56LyXQoe9ZTpC(99^;c~nCs)eh1COZ@f-*B= ztx-p?R=;bk&h+=7P|OgWbv}9irks+-W6NUzKW39Txet ze>4^e7QPRB$`k)39hGKt>2_FG6DPaI_g{A+uC99CSjEs#Zq-mVa#5N#r(?1a>d`ip z(I{@g_keZroJf5L-u-vV{@o=0T`Gju?&X6VD>nF3l-EL^Ek|&z;>E_P=cJI4lS#9B zZVr>-nZ&&}U}sljBgd#GXA@S$rqTl&bn?D+5oM`rQ@vi1%Cr0Oa}Zk(?A^0e^%y-T z1jhS4aA@PY#zDAbnNib{Uy8%fbE;Bv1+qD&AGn0u((hPWQ3zVE-lsTiUg-?eF?2`# zd%7YKf5<#UTxkl6drjLsw9#J1lp?D{`;o<0-~|gD)6gTDM;hB(`QX4NgE$=cq3vNH zz}c{Zh|EC@>*upe+0Bc3eRd z0K5wFi2QT@QFp@+BzT(0&nDj#!WtFV)y251=`z^1J9`iZb`K`wPJo(VLjjA3ml#3m zqldJ8J^v%1oJw#DS9M&*aFW z-Y%>%1V3w8!rnw5xx_ghRpKy~E0lcEfG@*bL3FV11Q$U>^uhkhwk#3f!Q7mLUNgqg zHiCX4nP0z(3tXiz(_i$4Z_f!BCAYXT>d`~js7;p*~i|=r!2wLI3yz8cN6P_1lBV1C_ zMd%r+RS^w#zC3S+-lz(OsVXPPD828~Zx%rXISpdt*z4qOS_qaYDo*5JU9jnSTmC51 z2U5ZsAkoS@7`uFAzhNz=_3eU9o)L96>bTmZNzdau94Rn6T_}TQ-!;bUXOiI@aZRmE z4XjedFtMYdi=sp$g92HIJGMN#zOn@y`vJV+{I|!Z7{dKv4z*8&^5usDW3I-&J4t&L zJY$`O?CBduxmuF~K>EofrN`-7=s&?!shLMO+>E_HsIsQ$Utz*IQaqo>(lr6fBRn`~ zOGPZd$T#UdMOgc47*!PwKt2%}#J1{UG=Dt#Rk5>*RH?~(yh}8I|JXe^5JR(U3inM< z{B3LBsT1Wti1T%NrAShoqlYJf2jDhJZh)caU81(NlN&dHiu1F;Gs4G2NbA+dv`FjK z6dD~y)Oq}G=peWp<2YZ&nQ+)7npsH#)x6N6f4#3;8ZRP^{|*;3<5qCjAl(jnUm#rk zp>I@TD0`6}IFGn2ir7!^(by0i61^&sx*n#nUJa#F`XG4NY>^oc_kkS&PVR6^paNs? z!>B7HweT)=4Kv_Pdh>30i!xoHFA{AhuzrIEHsyzO+s!5nP$ zm(K9TnX|oZdvE{aXQ%7}4VJ!Al9=Wy2Baap{vB#f)A`h`-BQpd>di0fUDLbCqLS=(po1DqK z(9ft?M@7sQSCU`RyT?1w>MYn#2}XIyLAX_?XVs#hN{R&l8|0A;@G!Gy3>@$=A#<=+ zVN){G!1ngEzDgxaG~G?4NN5~Kq3OpkBx6Tu%(3#fRe7^$ji0& zGY)Lvs~x2(q|0HD1oN-;f}EUuhbO;c65QR|^)Q0{?B~fgrKrM)yZXAwt8Tzn1YF!sS?jrMpmvRoBqQDYlc-{pv?l&md>aG1qXqD)S z)PKX+#`Q!T!%z+>5hcKy_0-p2qyWHoURPsVF=%R7eiL!C9$dj^2J@=Uf*Z)od6|rY z8hN*W{)V_2t&!s#l?dzO^&99DO$PRgozj@T?~ArK6P9nbt!NDWO$WYTm>l(naiYT;+C7n_ zVA)X7Q6Z5z02@@LSh8l8G=D z>N|}eZmH`qv4^2t4_eC@`&qWz8?tFL+?f&c&j diff --git a/experimental/images/macvlan_bridge_simple.svg b/experimental/images/macvlan_bridge_simple.svg deleted file mode 100644 index bce931f00..000000000 --- a/experimental/images/macvlan_bridge_simple.svg +++ /dev/null @@ -1 +0,0 @@ -container1172.16.86.2/24container2172.16.86.3/24pub_net (eth0)DockerHostdockernetworkcreate -dmacvlan \--subnet=172.16.86.0/24 \--gateway=172.16.86.1  \-oparent=eth1pub_neteth1172.16.86.0/24NetworkRouter172.16.86.1/24 \ No newline at end of file diff --git a/experimental/images/multi_tenant_8021q_vlans.gliffy b/experimental/images/multi_tenant_8021q_vlans.gliffy deleted file mode 100644 index 40eed1727..000000000 --- a/experimental/images/multi_tenant_8021q_vlans.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/multi_tenant_8021q_vlans.png b/experimental/images/multi_tenant_8021q_vlans.png deleted file mode 100644 index a38633cdbc23014364bfc611d650b2a17dc72ae0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/experimental/images/vlans-deeper-look.gliffy b/experimental/images/vlans-deeper-look.gliffy deleted file mode 100644 index 4d9f2761c..000000000 --- a/experimental/images/vlans-deeper-look.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/vlans-deeper-look.png b/experimental/images/vlans-deeper-look.png deleted file mode 100644 index 32d95f600e1d0f028e5a354584d7b3eac1639e35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! diff --git a/experimental/images/vlans-deeper-look.svg b/experimental/images/vlans-deeper-look.svg deleted file mode 100644 index 96cd21d52..000000000 --- a/experimental/images/vlans-deeper-look.svg +++ /dev/null @@ -1 +0,0 @@ -DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/experimental/plugins_graphdriver.md b/experimental/plugins_graphdriver.md deleted file mode 100644 index 1a291e0d5..000000000 --- a/experimental/plugins_graphdriver.md +++ /dev/null @@ -1,334 +0,0 @@ -# Experimental: Docker graph driver plugins - -Docker graph driver plugins enable admins to use an external/out-of-process -graph driver for use with Docker engine. This is an alternative to using the -built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. - -A graph driver plugin is used for image and container fs storage, as such -the plugin must be started and available for connections prior to Docker Engine -being started. - -# Write a graph driver plugin - -See the [plugin documentation](/docs/extend/plugins.md) for detailed information -on the underlying plugin protocol. - - -## Graph Driver plugin protocol - -If a plugin registers itself as a `GraphDriver` when activated, then it is -expected to provide the rootfs for containers as well as image layer storage. - -### /GraphDriver.Init - -**Request**: -``` -{ - "Home": "/graph/home/path", - "Opts": [] -} -``` - -Initialize the graph driver plugin with a home directory and array of options. -Plugins are not required to accept these options as the Docker Engine does not -require that the plugin use this path or options, they are only being passed -through from the user. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Create - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" - "MountLabel": "" -} -``` - -Create a new, empty, read-only filesystem layer with the specified -`ID`, `Parent` and `MountLabel`. `Parent` may be an empty string, -which would indicate that there is no parent layer. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.CreateReadWrite - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" - "MountLabel": "" -} -``` - -Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. - -### /GraphDriver.Remove - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Remove the filesystem layer with this given `ID`. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Get - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" - "MountLabel": "" -} -``` - -Get the mountpoint for the layered filesystem referred to by the given `ID`. - -**Response**: -``` -{ - "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Err": "" -} -``` - -Respond with the absolute path to the mounted layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Put - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Release the system resources for the specified `ID`, such as unmounting the -filesystem layer. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Exists - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Determine if a filesystem layer with the specified `ID` exists. - -**Response**: -``` -{ - "Exists": true -} -``` - -Respond with a boolean for whether or not the filesystem layer with the specified -`ID` exists. - -### /GraphDriver.Status - -**Request**: -``` -{} -``` - -Get low-level diagnostic information about the graph driver. - -**Response**: -``` -{ - "Status": [[]] -} -``` - -Respond with a 2-D array with key/value pairs for the underlying status -information. - - -### /GraphDriver.GetMetadata - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Get low-level diagnostic information about the layered filesystem with the -with the specified `ID` - -**Response**: -``` -{ - "Metadata": {}, - "Err": "" -} -``` - -Respond with a set of key/value pairs containing the low-level diagnostic -information about the layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Cleanup - -**Request**: -``` -{} -``` - -Perform necessary tasks to release resources help by the plugin, for example -unmounting all the layered file systems. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Diff - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get an archive of the changes between the filesystem layers specified by the `ID` -and `Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{{ TAR STREAM }} -``` - -### /GraphDriver.Changes - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get a list of changes between the filesystem layers specified by the `ID` and -`Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{ - "Changes": [{}], - "Err": "" -} -``` - -Responds with a list of changes. The structure of a change is: -``` - "Path": "/some/path", - "Kind": 0, -``` - -Where the `Path` is the filesystem path within the layered filesystem that is -changed and `Kind` is an integer specifying the type of change that occurred: - -- 0 - Modified -- 1 - Added -- 2 - Deleted - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.ApplyDiff - -**Request**: -``` -{{ TAR STREAM }} -``` - -Extract the changeset from the given diff into the layer with the specified `ID` -and `Parent` - -**Query Parameters**: - -- id (required)- the `ID` of the new filesystem layer to extract the diff to -- parent (required)- the `Parent` of the given `ID` - -**Response**: -``` -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size of the new layer in bytes. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.DiffSize - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Calculate the changes between the specified `ID` - -**Response**: -``` -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size changes between the specified `ID` and `Parent` -Respond with a non-empty string error if an error occurred. diff --git a/experimental/vlan-networks.md b/experimental/vlan-networks.md deleted file mode 100644 index 7704c5758..000000000 --- a/experimental/vlan-networks.md +++ /dev/null @@ -1,721 +0,0 @@ - - -# Macvlan and Ipvlan Network Drivers - -### Getting Started - -The Macvlan and Ipvlan drivers are currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN drivers build on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. - -Macvlan and Ipvlan are a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. - -Macvlan and Ipvlan offer a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having less moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a very simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no port mappings in these scenarios. - - -### Pre-Requisites - -- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) - -- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - -- Kernel requirements: - - - To check your current kernel version, use `uname -r` to display your kernel version - - Macvlan Linux kernel v3.9–3.19 and 4.0+ - - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) - - -### MacVlan Bridge Mode Example Usage - -Macvlan Bridge mode has a unique MAC address per container used to track MAC to port mappings by the Docker host. This is the largest difference from Ipvlan L2 mode which uses the same MAC address as the parent interface for each container `eth0` interface. - -- Macvlan and Ipvlan driver networks are attached to a parent Docker host interface. Examples are a physical interface such as `eth0`, a sub-interface for 802.1q VLAN tagging like `eth0.10` (`.10` representing VLAN `10`) or even bonded host adaptors which bundle two Ethernet interfaces into a single logical interface. - -- The specified gateway is external to the host provided by the network infrastructure. - -- Each Macvlan Bridge mode Docker network is isolated from one another and there can be only one network attached to a parent interface at a time. There is a theoretical limit of 4,094 sub-interfaces per host adaptor that a Docker network could be attached to. - -- It is not recommended to mix ipvlan and macvlan networks on the same `-o parent=` interface. Older kernel versions will throw uninformative netlink errors such as `device is busy`. - -- Any container inside the same subnet can talk any other container in the same network without a gateway in both `macvlan bridge` mode and `ipvlan L2` modes. - -- The same `docker network` commands apply to the vlan drivers. Some are irrelevant such as `-icc` or `--set-macaddress` for the Ipvlan driver. - -- In Macvlan and Ipvlan L2 mode, containers on separate networks cannot reach one another without an external process routing between the two networks/subnets. This also applies to multiple subnets within the same `docker network`. See Ipvlan L3 mode for inter-subnet communications without a router. - -In the following example, `eth0` on the docker host has an IP on the `172.16.86.0/24` network and a default gateway of `172.16.86.1`. The gateway is an external router with an address of `172.16.86.1`. An IP address is not required on the Docker host interface `eth0` in `bridge` mode, it merely needs to be on the proper upstream network to get forwarded by a network switch or network router. - -![Simple Macvlan Bridge Mode Example](images/macvlan_bridge_simple.png) - - -**Note** For Macvlan bridge mode and Ipvlan L2 mode the subnet values need to match the NIC's interface of the Docker host. For example, Use the same subnet and gateway of the Docker host ethernet interface that is specified by the `-o parent=` option. - -- The parent interface used in this example is `eth0` and it is on the subnet `172.16.86.0/24`. The containers in the `docker network` will also need to be on this same subnet as the parent `-o parent=`. The gateway is an external router on the network, not any ip masquerading or any other local proxy. - -- The driver is specified with `-d driver_name` option. In this case `-d macvlan` - -- The parent interface `-o parent=eth0` is configured as followed: - -``` -ip addr show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - inet 172.16.86.250/24 brd 172.16.86.255 scope global eth0 -``` - -Create the macvlan network and run a couple of containers attached to it: - -``` -# Macvlan (-o macvlan_mode= Defaults to Bridge mode if not specified) -docker network create -d macvlan \ - --subnet=172.16.86.0/24 \ - --gateway=172.16.86.1 \ - -o parent=eth0 pub_net - -# Run a container on the new network specifying the --ip address. -docker run --net=pub_net --ip=172.16.86.10 -itd alpine /bin/sh - -# Start a second container and ping the first -docker run --net=pub_net -it --rm alpine /bin/sh -ping -c 4 172.16.86.10 - -``` - - Take a look at the containers ip and routing table: - -``` - -ip a show eth0 - eth0@if3: mtu 1500 qdisc noqueue state UNKNOWN - link/ether 46:b2:6b:26:2f:69 brd ff:ff:ff:ff:ff:ff - inet 172.16.86.2/24 scope global eth0 - -ip route - default via 172.16.86.1 dev eth0 - 172.16.86.0/24 dev eth0 src 172.16.86.2 - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -# In this case the containers cannot ping the -o parent=172.16.86.250 -``` - - -You can explicitly specify the `bridge` mode option `-o macvlan_mode=bridge`. It is the default so will be in `bridge` mode either way. - -While the `eth0` interface does not need to have an IP address in Macvlan Bridge mode or Ipvlan L2 mode it is not uncommon to have an IP address on the interface. Addresses can be excluded from getting an address from the default built in IPAM by using the `--aux-address=x.x.x.x` flag. This will blacklist the specified address from being handed out to containers. The same network example above blocking the `-o parent=eth0` address from being handed out to a container. - -``` -docker network create -d macvlan \ - --subnet=172.16.86.0/24 \ - --gateway=172.16.86.1 \ - --aux-address="exclude_host=172.16.86.250" \ - -o parent=eth0 pub_net -``` - -Another option for subpool IP address selection in a network provided by the default Docker IPAM driver is to use `--ip-range=`. This specifies the driver to allocate container addresses from this pool rather then the broader range from the `--subnet=` argument from a network create as seen in the following example that will allocate addresses beginning at `192.168.32.128` and increment upwards from there. - -``` -docker network create -d macvlan \ - --subnet=192.168.32.0/24 \ - --ip-range=192.168.32.128/25 \ - --gateway=192.168.32.254 \ - -o parent=eth0 macnet32 - -# Start a container and verify the address is 192.168.32.128 -docker run --net=macnet32 -it --rm alpine /bin/sh -``` - -The network can then be deleted with: - -``` -docker network rm -``` - -- **Note:** In both Macvlan and Ipvlan you are not able to ping or communicate with the default namespace IP address. For example, if you create a container and try to ping the Docker host's `eth0` it will **not** work. That traffic is explicitly filtered by the kernel modules themselves to offer additional provider isolation and security. - -For more on Docker networking commands see [Working with Docker network commands](https://docs.docker.com/engine/userguide/networking/work-with-networks/) - -### Ipvlan L2 Mode Example Usage - -The ipvlan `L2` mode example is virtually identical to the macvlan `bridge` mode example. The driver is specified with `-d driver_name` option. In this case `-d ipvlan` - -![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) - -The parent interface in the next example `-o parent=eth0` is configured as followed: - -``` -ip addr show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - -Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. - -Create the ipvlan network and run a container attaching to it: - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - --gateway=192.168.1.1 \ - -o ipvlan_mode=l2 \ - -o parent=eth0 db_net - -# Start a container on the db_net network -docker run --net=db_net -it --rm alpine /bin/sh - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The default mode for Ipvlan is `l2`. The default mode for Macvlan is `bridge`. If `-o ipvlan_mode=` or `-o macvlan_mode=` are left unspecified, the default modes will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. - -To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to both Macvlan Bride mode and Ipvlan L2 mode. - -![Multiple Ipvlan and Macvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) - -The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - -o parent=eth0 db_net_ipv - -# Start a container with an explicit name in daemon mode -docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh - -# Start a second container and ping using the container name -# to see the docker included name resolution functionality -docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh -ping -c 4 ipv1 - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a network create is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. - -The following two `docker network create` examples result in identical networks that you can attach container to: - -``` -# Empty '-o parent=' creates an isolated network -docker network create -d ipvlan \ - --subnet=192.168.10.0/24 isolated1 - -# Explicit '--internal' flag is the same: -docker network create -d ipvlan \ - --subnet=192.168.11.0/24 --internal isolated2 - -# Even the '--subnet=' can be left empty and the default -# IPAM subnet of 172.18.0.0/16 will be assigned -docker network create -d ipvlan isolated3 - -docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh -docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh -docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh - -# To attach to any use `docker exec` and start a shell -docker exec -it cid1 /bin/sh -docker exec -it cid2 /bin/sh -docker exec -it cid3 /bin/sh -``` - -### Macvlan 802.1q Trunk Bridge Mode Example Usage - -VLANs (Virtual Local Area Networks) have long been a primary means of virtualizing data center networks and are still in virtually all existing networks today. VLANs work by tagging a Layer-2 isolation domain with a 12-bit identifier ranging from 1-4094 that is inserted into a packet header that enables a logical grouping of a single or multiple subnets of both IPv4 and IPv6. It is very common for network operators to separate traffic using VLANs based on a subnet(s) function or security profile such as `web`, `db` or any other isolation needs. - -It is very common to have a compute host requirement of running multiple virtual networks concurrently on a host. Linux networking has long supported VLAN tagging, also known by it's standard 802.1q, for maintaining datapath isolation between networks. The Ethernet link connected to a Docker host can be configured to support the 802.1q VLAN IDs, by creating Linux sub-interfaces, each one dedicated to a unique VLAN ID. - -![Simple Ipvlan L2 Mode Example](images/multi_tenant_8021q_vlans.png) - -Trunking 802.1q to a Linux host is notoriously painful for many in operations. It requires configuration file changes in order to be persistent through a reboot. If a bridge is involved, a physical NIC needs to be moved into the bridge and the bridge then gets the IP address. This has lead to many a stranded servers since the risk of cutting off access during that convoluted process is high. - -Like all of the Docker network drivers, the overarching goal is to alleviate the operational pains of managing network resources. To that end, when a network receives a sub-interface as the parent that does not exist, the drivers create the VLAN tagged interfaces while creating the network. - -In the case of a host reboot, instead of needing to modify often complex network configuration files the driver will recreate all network links when the Docker daemon restarts. The driver tracks if it created the VLAN tagged sub-interface originally with the network create and will **only** recreate the sub-interface after a restart or delete `docker network rm` the link if it created it in the first place with `docker network create`. - -If the user doesn't want Docker to modify the `-o parent` sub-interface, the user simply needs to pass an existing link that already exists as the parent interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces that are not master links. - -For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. - -For example: `eth0.50` denotes a parent interface of `eth0` with a slave of `eth0.50` tagged with vlan id `50`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.50 type vlan id 50`. - -Replace the `macvlan` with `ipvlan` in the `-d` driver argument to create macvlan 802.1q trunks. - -**Vlan ID 50** - -In the first network tagged and isolated by the Docker host, `eth0.50` is the parent interface tagged with vlan id `50` specified with `-o parent=eth0.50`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d macvlan \ - --subnet=192.168.50.0/24 \ - --gateway=192.168.50.1 \ - -o parent=eth0.50 macvlan50 - -# In two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=macvlan50 -it --name macvlan_test5 --rm alpine /bin/sh -docker run --net=macvlan50 -it --name macvlan_test6 --rm alpine /bin/sh -``` - -**Vlan ID 60** - -In the second network, tagged and isolated by the Docker host, `eth0.60` is the parent interface tagged with vlan id `60` specified with `-o parent=eth0.60`. The `macvlan_mode=` defaults to `macvlan_mode=bridge`. It can also be explicitly set with the same result as shown in the next example. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. -docker network create -d macvlan \ - --subnet=192.168.60.0/24 \ - --gateway=192.168.60.1 \ - -o parent=eth0.60 -o \ - -o macvlan_mode=bridge macvlan60 - -# In two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=macvlan60 -it --name macvlan_test7 --rm alpine /bin/sh -docker run --net=macvlan60 -it --name macvlan_test8 --rm alpine /bin/sh -``` - -**Example:** Multi-Subnet Macvlan 802.1q Trunking - -The same as the example before except there is an additional subnet bound to the network that the user can choose to provision containers on. In MacVlan/Bridge mode, containers can only ping one another if they are on the same subnet/broadcast domain unless there is an external router that routes the traffic (answers ARP etc) between the two subnets. - -``` -### Create multiple L2 subnets -docker network create -d ipvlan \ - --subnet=192.168.210.0/24 \ - --subnet=192.168.212.0/24 \ - --gateway=192.168.210.254 \ - --gateway=192.168.212.254 \ - -o ipvlan_mode=l2 ipvlan210 - -# Test 192.168.210.0/24 connectivity between containers -docker run --net=ipvlan210 --ip=192.168.210.10 -itd alpine /bin/sh -docker run --net=ipvlan210 --ip=192.168.210.9 -it --rm alpine ping -c 2 192.168.210.10 - -# Test 192.168.212.0/24 connectivity between containers -docker run --net=ipvlan210 --ip=192.168.212.10 -itd alpine /bin/sh -docker run --net=ipvlan210 --ip=192.168.212.9 -it --rm alpine ping -c 2 192.168.212.10 - -``` - -### Ipvlan 802.1q Trunk L2 Mode Example Usage - -Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. - -Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. - -![Docker VLANs in Depth](images/vlans-deeper-look.png) - -The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. - -For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. - -The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. - -For example: `eth0.10` to denote a sub-interface of `eth0` tagged with vlan id `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. - -The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. - -**Vlan ID 20** - -In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.20.0/24 \ - --gateway=192.168.20.1 \ - -o parent=eth0.20 ipvlan20 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh -docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh -``` - -**Vlan ID 30** - -In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. -docker network create -d ipvlan \ - --subnet=192.168.30.0/24 \ - --gateway=192.168.30.1 \ - -o parent=eth0.30 \ - -o ipvlan_mode=l2 ipvlan30 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh -docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh -``` - -The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. - -``` -$ ip route - default via 192.168.30.1 dev eth0 - 192.168.30.0/24 dev eth0 src 192.168.30.2 -``` - -Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. This same multi-subnet example is also valid for Macvlan `bridge` mode. - -Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to a L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). - -``` -docker network create -d ipvlan \ - --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ - --gateway=192.168.114.254 --gateway=192.168.116.254 \ - -o parent=eth0.114 \ - -o ipvlan_mode=l2 ipvlan114 - -docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh -docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh -``` - -A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. - -An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: - -- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 - - - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` - -- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 - - - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` - -- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 - - - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` - -### IPVlan L3 Mode Example - -IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. - -![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) - -Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Macvlan Bridge and Ipvlan L2 modes are well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. - -- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. - -- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. - -- Unlike macvlan bridge mode and ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. - -``` -ip a show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - --A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. - -The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. - -The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. - -``` -# Create the Ipvlan L3 network -docker network create -d ipvlan \ - --subnet=192.168.214.0/24 \ - --subnet=10.1.214.0/24 \ - -o ipvlan_mode=l3 ipnet210 - -# Test 192.168.214.0/24 connectivity -docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh -docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh - -# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 -docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 - -# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 -docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 - -``` - -Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: - -``` -# Inside an L3 mode container -$ ip route - default dev eth0 - 192.168.120.0/24 dev eth0 src 192.168.120.2 -``` - -In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. - -### Dual Stack IPv4 IPv6 Macvlan Bridge Mode - -**Example:** Macvlan Bridge mode, 802.1q trunk, VLAN ID: 218, Multi-Subnet, Dual Stack - -``` -# Create multiple bridge subnets with a gateway of x.x.x.1: -docker network create -d macvlan \ - --subnet=192.168.216.0/24 --subnet=192.168.218.0/24 \ - --gateway=192.168.216.1 --gateway=192.168.218.1 \ - --subnet=2001:db8:abc8::/64 --gateway=2001:db8:abc8::10 \ - -o parent=eth0.218 \ - -o macvlan_mode=bridge macvlan216 - -# Start a container on the first subnet 192.168.216.0/24 -docker run --net=macvlan216 --name=macnet216_test --ip=192.168.216.10 -itd alpine /bin/sh - -# Start a container on the second subnet 192.168.218.0/24 -docker run --net=macvlan216 --name=macnet216_test --ip=192.168.218.10 -itd alpine /bin/sh - -# Ping the first container started on the 192.168.216.0/24 subnet -docker run --net=macvlan216 --ip=192.168.216.11 -it --rm alpine /bin/sh -ping 192.168.216.10 - -# Ping the first container started on the 192.168.218.0/24 subnet -docker run --net=macvlan216 --ip=192.168.218.11 -it --rm alpine /bin/sh -ping 192.168.218.10 -``` - -View the details of one of the containers: - -``` -docker run --net=macvlan216 --ip=192.168.216.11 -it --rm alpine /bin/sh - -root@526f3060d759:/# ip a show eth0 - eth0@if92: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 8e:9a:99:25:b6:16 brd ff:ff:ff:ff:ff:ff - inet 192.168.216.11/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::8c9a:99ff:fe25:b616/64 scope link tentative - valid_lft forever preferred_lft forever - inet6 2001:db8:abc8::2/64 scope link nodad - valid_lft forever preferred_lft forever - -# Specified v4 gateway of 192.168.216.1 -root@526f3060d759:/# ip route - default via 192.168.216.1 dev eth0 - 192.168.216.0/24 dev eth0 proto kernel scope link src 192.168.216.11 - -# Specified v6 gateway of 2001:db8:abc8::10 -root@526f3060d759:/# ip -6 route - 2001:db8:abc4::/64 dev eth0 proto kernel metric 256 - 2001:db8:abc8::/64 dev eth0 proto kernel metric 256 - default via 2001:db8:abc8::10 dev eth0 metric 1024 -``` - -### Dual Stack IPv4 IPv6 Ipvlan L2 Mode - -- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. - -- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. - -``` -# Create a v6 network -docker network create -d ipvlan \ - --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ - -o parent=eth0.139 v6ipvlan139 - -# Start a container on the network -docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -``` - -View the container eth0 interface and v6 routing table: - -``` - eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.2/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@5c1dc74b1daa:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc2::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc2::22 dev eth0 metric 1024 -``` - -Start a second container and ping the first container's v6 address. - -``` -$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -root@b817e42fcc54:/# ip a show eth0 -75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.3/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::2/64 scope link nodad - valid_lft forever preferred_lft forever - -root@b817e42fcc54:/# ping6 2001:db8:abc2::1 -PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms - -2 packets transmitted, 2 packets received, 0% packet loss -round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms -``` - -The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. - -Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: - -``` -docker network create -d ipvlan \ - --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ - --gateway=192.168.140.1 --gateway=192.168.142.1 \ - --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ - -o parent=eth0.140 \ - -o ipvlan_mode=l2 ipvlan140 -``` - -Start a container and view eth0 and both v4 & v6 routing tables: - -``` -docker run --net=v6ipvlan139 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh - -root@3cce0d3575f3:/# ip a show eth0 -78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.140.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc9::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@3cce0d3575f3:/# ip route -default via 192.168.140.1 dev eth0 -192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 - -root@3cce0d3575f3:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc9::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc9::22 dev eth0 metric 1024 -``` - -Start a second container with a specific `--ip4` address and ping the first host using ipv4 packets: - -``` -docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh -``` - -**Note**: Different subnets on the same parent interface in both Ipvlan `L2` mode and Macvlan `bridge` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. - - - -### Dual Stack IPv4 IPv6 Ipvlan L3 Mode - - -**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 - -As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. - -The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. - -``` -# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network -# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' -docker network create -d ipvlan \ - --subnet=192.168.110.0/24 \ - --subnet=192.168.112.0/24 \ - --subnet=2001:db8:abc6::/64 \ - -o parent=eth0 \ - -o ipvlan_mode=l3 ipnet110 - - -# Start a few of containers on the network (ipnet110) -# in separate terminals and check connectivity -docker run --net=ipnet110 -it --rm alpine /bin/sh -# Start a second container specifying the v6 address -docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh -# Start a third specifying the IPv4 address -docker run --net=ipnet110 --ip=192.168.112.50 -it --rm alpine /bin/sh -# Start a 4th specifying both the IPv4 and IPv6 addresses -docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh -``` - -Interface and routing table outputs are as follows: - -``` -root@3a368b2a982e:/# ip a show eth0 -63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.112.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc6::10/64 scope link nodad - valid_lft forever preferred_lft forever - -# Note the default route is simply the eth device because ARPs are filtered. -root@3a368b2a982e:/# ip route - default dev eth0 scope link - 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 - -root@3a368b2a982e:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc6::/64 dev eth0 proto kernel metric 256 -default dev eth0 metric 1024 -``` - -*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. - -``` -docker: Error response from daemon: Address already in use. -``` - -### Manually Creating 802.1q Links - -**Vlan ID 40** - -If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and us up. - -Links if manually created can be named anything you want. As long as the exist when the network is created that is all that matters. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. - -``` -# create a new sub-interface tied to dot1q vlan 40 -ip link add link eth0 name eth0.40 type vlan id 40 - -# enable the new sub-interface -ip link set eth0.40 up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 \ - --gateway=192.168.40.1 \ - -o parent=eth0.40 ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -**Example:** Vlan sub-interface manually created with any name: - -``` -# create a new sub interface tied to dot1q vlan 40 -ip link add link eth0 name foo type vlan id 40 - -# enable the new sub-interface -ip link set foo up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ - -o parent=foo ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -Manually created links can be cleaned up with: - -``` -ip link del foo -``` - -As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/hack/make/binary b/hack/make/binary index e7813580b..445b48bda 100644 --- a/hack/make/binary +++ b/hack/make/binary @@ -3,16 +3,6 @@ set -e rm -rf "$DEST" # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/binary-client -) -( - DEST="${DEST}-daemon" - ABS_DEST="${ABS_DEST}-daemon" - . hack/make/binary-daemon -) ( DEST="${DEST}-cowman" ABS_DEST="${ABS_DEST}-cowman" diff --git a/hack/make/binary-client b/hack/make/binary-client deleted file mode 100644 index ac3d6d57b..000000000 --- a/hack/make/binary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" - -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_CLIENT_BINARY_NAME" - export SOURCE_PATH='./cmd/docker' - source "${MAKEDIR}/.binary" -) diff --git a/hack/make/binary-daemon b/hack/make/binary-daemon deleted file mode 100644 index e75c44c58..000000000 --- a/hack/make/binary-daemon +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" - -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_DAEMON_BINARY_NAME" - export SOURCE_PATH='./cmd/dockerd' - source "${MAKEDIR}/.binary" - export BINARY_SHORT_NAME="$DOCKER_PROXY_BINARY_NAME" - export SOURCE_PATH='./vendor/src/github.com/docker/libnetwork/cmd/proxy' - source "${MAKEDIR}/.binary" - copy_containerd "$DEST" 'hash' -) diff --git a/hack/make/dynbinary b/hack/make/dynbinary index cc7b31548..d6c88fc40 100644 --- a/hack/make/dynbinary +++ b/hack/make/dynbinary @@ -2,16 +2,6 @@ set -e # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/dynbinary-client -) -( - DEST="${DEST}-daemon" - ABS_DEST="${ABS_DEST}-daemon" - . hack/make/dynbinary-daemon -) ( DEST="${DEST}-cowman" ABS_DEST="${ABS_DEST}-cowman" diff --git a/hack/make/dynbinary-client b/hack/make/dynbinary-client deleted file mode 100644 index 83ae3c453..000000000 --- a/hack/make/dynbinary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -( - export BINARY_SHORT_NAME='docker' - export SOURCE_PATH='./cmd/docker' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" -) diff --git a/hack/make/dynbinary-daemon b/hack/make/dynbinary-daemon deleted file mode 100644 index 2d1ed2583..000000000 --- a/hack/make/dynbinary-daemon +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -( - export BINARY_SHORT_NAME='dockerd' - export SOURCE_PATH='./cmd/dockerd' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" - export BINARY_SHORT_NAME='docker-proxy' - export SOURCE_PATH='./vendor/src/github.com/docker/libnetwork/cmd/proxy' - export LDFLAGS_STATIC_DOCKER='-linkmode=external' - source "${MAKEDIR}/.binary" -) diff --git a/hack/make/install-binary-client b/hack/make/install-binary-client deleted file mode 100644 index 6c8045265..000000000 --- a/hack/make/install-binary-client +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-client" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" -) diff --git a/hack/make/install-binary-daemon b/hack/make/install-binary-daemon deleted file mode 100644 index e80d8431f..000000000 --- a/hack/make/install-binary-daemon +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-daemon" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" -) diff --git a/image/fs.go b/image/fs.go deleted file mode 100644 index 955e1b853..000000000 --- a/image/fs.go +++ /dev/null @@ -1,175 +0,0 @@ -package image - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -// IDWalkFunc is function called by StoreBackend.Walk -type IDWalkFunc func(id ID) error - -// StoreBackend provides interface for image.Store persistence -type StoreBackend interface { - Walk(f IDWalkFunc) error - Get(id ID) ([]byte, error) - Set(data []byte) (ID, error) - Delete(id ID) error - SetMetadata(id ID, key string, data []byte) error - GetMetadata(id ID, key string) ([]byte, error) - DeleteMetadata(id ID, key string) error -} - -// fs implements StoreBackend using the filesystem. -type fs struct { - sync.RWMutex - root string -} - -const ( - contentDirName = "content" - metadataDirName = "metadata" -) - -// NewFSStoreBackend returns new filesystem based backend for image.Store -func NewFSStoreBackend(root string) (StoreBackend, error) { - return newFSStore(root) -} - -func newFSStore(root string) (*fs, error) { - s := &fs{ - root: root, - } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - return s, nil -} - -func (s *fs) contentFile(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -func (s *fs) metadataDir(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -// Walk calls the supplied callback for each image ID in the storage backend. -func (s *fs) Walk(f IDWalkFunc) error { - // Only Canonical digest (sha256) is currently supported - s.RLock() - dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) - s.RUnlock() - if err != nil { - return err - } - for _, v := range dir { - dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) - continue - } - if err := f(ID(dgst)); err != nil { - return err - } - } - return nil -} - -// Get returns the content stored under a given ID. -func (s *fs) Get(id ID) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - return s.get(id) -} - -func (s *fs) get(id ID) ([]byte, error) { - content, err := ioutil.ReadFile(s.contentFile(id)) - if err != nil { - return nil, err - } - - // todo: maybe optional - if ID(digest.FromBytes(content)) != id { - return nil, fmt.Errorf("failed to verify image: %v", id) - } - - return content, nil -} - -// Set stores content under a given ID. -func (s *fs) Set(data []byte) (ID, error) { - s.Lock() - defer s.Unlock() - - if len(data) == 0 { - return "", fmt.Errorf("Invalid empty data") - } - - id := ID(digest.FromBytes(data)) - if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil { - return "", err - } - - return id, nil -} - -// Delete removes content and metadata files associated with the ID. -func (s *fs) Delete(id ID) error { - s.Lock() - defer s.Unlock() - - if err := os.RemoveAll(s.metadataDir(id)); err != nil { - return err - } - if err := os.Remove(s.contentFile(id)); err != nil { - return err - } - return nil -} - -// SetMetadata sets metadata for a given ID. It fails if there's no base file. -func (s *fs) SetMetadata(id ID, key string, data []byte) error { - s.Lock() - defer s.Unlock() - if _, err := s.get(id); err != nil { - return err - } - - baseDir := filepath.Join(s.metadataDir(id)) - if err := os.MkdirAll(baseDir, 0700); err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600) -} - -// GetMetadata returns metadata for a given ID. -func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - if _, err := s.get(id); err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) -} - -// DeleteMetadata removes the metadata associated with an ID. -func (s *fs) DeleteMetadata(id ID, key string) error { - s.Lock() - defer s.Unlock() - - return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) -} diff --git a/image/fs_test.go b/image/fs_test.go deleted file mode 100644 index 1a6f849f6..000000000 --- a/image/fs_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package image - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "errors" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/distribution/digest" -) - -func TestFSGetSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testGetSet(t, fs) -} - -func TestFSGetInvalidData(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id, err := fs.Set([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - - dgst := digest.Digest(id) - - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { - t.Fatal(err) - } - - _, err = fs.Get(id) - if err == nil { - t.Fatal("Expected get to fail after data modification.") - } -} - -func TestFSInvalidSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id := digest.FromBytes([]byte("foobar")) - err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) - if err != nil { - t.Fatal(err) - } - - _, err = fs.Set([]byte("foobar")) - if err == nil { - t.Fatal("Expecting error from invalid filesystem data.") - } -} - -func TestFSInvalidRoot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - tcases := []struct { - root, invalidFile string - }{ - {"root", "root"}, - {"root", "root/content"}, - {"root", "root/metadata"}, - } - - for _, tc := range tcases { - root := filepath.Join(tmpdir, tc.root) - filePath := filepath.Join(tmpdir, tc.invalidFile) - err := os.MkdirAll(filepath.Dir(filePath), 0700) - if err != nil { - t.Fatal(err) - } - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - _, err = NewFSStoreBackend(root) - if err == nil { - t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) - } - - os.RemoveAll(root) - } - -} - -func testMetadataGetSet(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - tcases := []struct { - id ID - key string - value []byte - }{ - {id, "tkey", []byte("tval1")}, - {id, "tkey2", []byte("tval2")}, - {id2, "tkey", []byte("tval3")}, - } - - for _, tc := range tcases { - err = store.SetMetadata(tc.id, tc.key, tc.value) - if err != nil { - t.Fatal(err) - } - - actual, err := store.GetMetadata(tc.id, tc.key) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(actual, tc.value) != 0 { - t.Fatalf("Metadata expected %q, got %q", tc.value, actual) - } - } - - _, err = store.GetMetadata(id2, "tkey2") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown key") - } - - id3 := digest.FromBytes([]byte("baz")) - err = store.SetMetadata(ID(id3), "tkey", []byte("tval")) - if err == nil { - t.Fatal("Expected error for setting metadata for unknown ID.") - } - - _, err = store.GetMetadata(ID(id3), "tkey") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown ID.") - } -} - -func TestFSMetadataGetSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testMetadataGetSet(t, fs) -} - -func TestFSDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testDelete(t, fs) -} - -func TestFSWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testWalker(t, fs) -} - -func TestFSInvalidWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - fooID, err := fs.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { - t.Fatal(err) - } - - n := 0 - err = fs.Walk(func(id ID) error { - if id != fooID { - t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) - } - n++ - return nil - }) - if err != nil { - t.Fatalf("Invalid data should not have caused walker error, got %v", err) - } - if n != 1 { - t.Fatalf("Expected 1 walk initialization, got %d", n) - } -} - -func testGetSet(t *testing.T, store StoreBackend) { - type tcase struct { - input []byte - expected ID - } - tcases := []tcase{ - {[]byte("foobar"), ID("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, - } - - randomInput := make([]byte, 8*1024) - _, err := rand.Read(randomInput) - if err != nil { - t.Fatal(err) - } - // skipping use of digest pkg because its used by the implementation - h := sha256.New() - _, err = h.Write(randomInput) - if err != nil { - t.Fatal(err) - } - tcases = append(tcases, tcase{ - input: randomInput, - expected: ID("sha256:" + hex.EncodeToString(h.Sum(nil))), - }) - - for _, tc := range tcases { - id, err := store.Set([]byte(tc.input)) - if err != nil { - t.Fatal(err) - } - if id != tc.expected { - t.Fatalf("Expected ID %q, got %q", tc.expected, id) - } - } - - for _, emptyData := range [][]byte{nil, {}} { - _, err := store.Set(emptyData) - if err == nil { - t.Fatal("Expected error for nil input.") - } - } - - for _, tc := range tcases { - data, err := store.Get(tc.expected) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(data, tc.input) != 0 { - t.Fatalf("Expected data %q, got %q", tc.input, data) - } - } - - for _, key := range []ID{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { - _, err := store.Get(key) - if err == nil { - t.Fatalf("Expected error for ID %q.", key) - } - } - -} - -func testDelete(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - err = store.Delete(id) - if err != nil { - t.Fatal(err) - } - - _, err = store.Get(id) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id) - } - _, err = store.Get(id2) - if err != nil { - t.Fatal(err) - } - - err = store.Delete(id2) - if err != nil { - t.Fatal(err) - } - _, err = store.Get(id2) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id2) - } -} - -func testWalker(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - tcases := make(map[ID]struct{}) - tcases[id] = struct{}{} - tcases[id2] = struct{}{} - n := 0 - err = store.Walk(func(id ID) error { - delete(tcases, id) - n++ - return nil - }) - if err != nil { - t.Fatal(err) - } - - if n != 2 { - t.Fatalf("Expected 2 walk initializations, got %d", n) - } - if len(tcases) != 0 { - t.Fatalf("Expected empty unwalked set, got %+v", tcases) - } - - // stop on error - tcases = make(map[ID]struct{}) - tcases[id] = struct{}{} - err = store.Walk(func(id ID) error { - return errors.New("") - }) - if err == nil { - t.Fatalf("Exected error from walker.") - } -} diff --git a/image/image.go b/image/image.go deleted file mode 100644 index 7a05e649f..000000000 --- a/image/image.go +++ /dev/null @@ -1,140 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "io" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/engine-api/types/container" -) - -// ID is the content-addressable ID of an image. -type ID digest.Digest - -func (id ID) String() string { - return digest.Digest(id).String() -} - -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent id of the image - Parent string `json:"parent,omitempty"` - // Comment user added comment - Comment string `json:"comment,omitempty"` - // Created timestamp when image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies version on which image is built - DockerVersion string `json:"docker_version,omitempty"` - // Author of the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Image stores the image configuration -type Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// RawJSON returns the immutable JSON associated with the image. -func (img *Image) RawJSON() []byte { - return img.rawJSON -} - -// ID returns the image's content-addressable ID. -func (img *Image) ID() ID { - return img.computedID -} - -// ImageID stringizes ID. -func (img *Image) ImageID() string { - return string(img.ID()) -} - -// RunConfig returns the image's container config. -func (img *Image) RunConfig() *container.Config { - return img.Config -} - -// MarshalJSON serializes the image to JSON. It sorts the top-level keys so -// that JSON that's been manipulated by a push/pull cycle with a legacy -// registry won't end up with a different key order. -func (img *Image) MarshalJSON() ([]byte, error) { - type MarshalImage Image - - pass1, err := json.Marshal(MarshalImage(*img)) - if err != nil { - return nil, err - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(pass1, &c); err != nil { - return nil, err - } - return json.Marshal(c) -} - -// History stores build commands that were used to create an image -type History struct { - // Created timestamp for build point - Created time.Time `json:"created"` - // Author of the build point - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building image. - CreatedBy string `json:"created_by,omitempty"` - // Comment is custom message set by the user when creating the image. - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Exporter provides interface for exporting and importing images -type Exporter interface { - Load(io.ReadCloser, io.Writer, bool) error - // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error - Save([]string, io.Writer) error -} - -// NewFromJSON creates an Image configuration from json. -func NewFromJSON(src []byte) (*Image, error) { - img := &Image{} - - if err := json.Unmarshal(src, img); err != nil { - return nil, err - } - if img.RootFS == nil { - return nil, errors.New("Invalid image JSON, no RootFS key.") - } - - img.rawJSON = src - - return img, nil -} diff --git a/image/image_test.go b/image/image_test.go deleted file mode 100644 index 525023b81..000000000 --- a/image/image_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package image - -import ( - "encoding/json" - "sort" - "strings" - "testing" -) - -const sampleImageJSON = `{ - "architecture": "amd64", - "os": "linux", - "config": {}, - "rootfs": { - "type": "layers", - "diff_ids": [] - } -}` - -func TestJSON(t *testing.T) { - img, err := NewFromJSON([]byte(sampleImageJSON)) - if err != nil { - t.Fatal(err) - } - rawJSON := img.RawJSON() - if string(rawJSON) != sampleImageJSON { - t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) - } -} - -func TestInvalidJSON(t *testing.T) { - _, err := NewFromJSON([]byte("{}")) - if err == nil { - t.Fatal("Expected JSON parse error") - } -} - -func TestMarshalKeyOrder(t *testing.T) { - b, err := json.Marshal(&Image{ - V1Image: V1Image{ - Comment: "a", - Author: "b", - Architecture: "c", - }, - }) - if err != nil { - t.Fatal(err) - } - - expectedOrder := []string{"architecture", "author", "comment"} - var indexes []int - for _, k := range expectedOrder { - indexes = append(indexes, strings.Index(string(b), k)) - } - - if !sort.IntsAreSorted(indexes) { - t.Fatal("invalid key order in JSON: ", string(b)) - } -} diff --git a/image/rootfs.go b/image/rootfs.go deleted file mode 100644 index 76eaae0c2..000000000 --- a/image/rootfs.go +++ /dev/null @@ -1,16 +0,0 @@ -package image - -import "github.com/docker/docker/layer" - -// TypeLayers is used for RootFS.Type for filesystems organized into layers. -const TypeLayers = "layers" - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: TypeLayers} -} - -// Append appends a new diffID to rootfs -func (r *RootFS) Append(id layer.DiffID) { - r.DiffIDs = append(r.DiffIDs, id) -} diff --git a/image/rootfs_unix.go b/image/rootfs_unix.go deleted file mode 100644 index 83498f6c3..000000000 --- a/image/rootfs_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !windows - -package image - -import "github.com/docker/docker/layer" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - return layer.CreateChainID(r.DiffIDs) -} diff --git a/image/rootfs_windows.go b/image/rootfs_windows.go deleted file mode 100644 index c5bd5828b..000000000 --- a/image/rootfs_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build windows - -package image - -import ( - "crypto/sha512" - "fmt" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer. -const TypeLayersWithBase = "layers+base" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - -// BaseLayerID returns the 64 byte hex ID for the baselayer name. -func (r *RootFS) BaseLayerID() string { - if r.Type != TypeLayersWithBase { - panic("tried to get base layer ID without a base layer") - } - baseID := sha512.Sum384([]byte(r.BaseLayer)) - return fmt.Sprintf("%x", baseID[:32]) -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - ids := r.DiffIDs - if r.Type == TypeLayersWithBase { - // Add an extra ID for the base. - baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID()))) - ids = append([]layer.DiffID{baseDiffID}, ids...) - } - return layer.CreateChainID(ids) -} - -// NewRootFSWithBaseLayer returns a RootFS struct with a base layer -func NewRootFSWithBaseLayer(baseLayer string) *RootFS { - return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer} -} diff --git a/image/spec/v1.1.md b/image/spec/v1.1.md deleted file mode 100644 index 3a32b6bd3..000000000 --- a/image/spec/v1.1.md +++ /dev/null @@ -1,640 +0,0 @@ -# Docker Image Specification v1.1.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -This version of the image specification was adopted starting in Docker 1.10. - -## Terminology - -This specification uses the following terms: - - - -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "created": "2015-10-31T22:22:56.015925234Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - }, - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - ], - "type": "layers" - }, - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", - "empty_layer": true - } - ] -} -``` - -Note that image JSON files produced by Docker don't contain formatting -whitespace. It has been added to this example for clarity. - -### Image JSON Field Descriptions - -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- rootfs struct -
-
- The rootfs key references the layer content addresses used by the - image. This makes the image config hash depend on the filesystem hash. - rootfs has two subkeys: - -
    -
  • - type is usually set to layers. There is - also a Windows-specific value layers+base that allows - a base layer to be specified in a field of rootfs - called base_layer. -
  • -
  • - diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. -
  • -
- - - Here is an example rootfs section: - -
"rootfs": {
-  "diff_ids": [
-    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
-    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
-    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
-  ],
-  "type": "layers"
-}
-
-
- history struct -
-
- history is an array of objects describing the history of - each layer. The array is ordered from bottom-most layer to top-most - layer. The object has the following fields. - -
    -
  • - created: Creation time, expressed as a ISO-8601 formatted - combined date and time -
  • -
  • - author: The author of the build point -
  • -
  • - created_by: The command which created the layer -
  • -
  • - comment: A custom message set when creating the layer -
  • -
  • - empty_layer: This field is used to mark if the history - item created a filesystem diff. It is set to true if this history - item doesn't correspond to an actual layer in the rootfs section - (for example, a command like ENV which results in no change to the - filesystem). -
  • -
- -Here is an example history section: - -
"history": [
-  {
-    "created": "2015-10-31T22:22:54.690851953Z",
-    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
-  },
-  {
-    "created": "2015-10-31T22:22:55.613815829Z",
-    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
-    "empty_layer": true
-  }
-]
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory. Here is the -initial empty directory structure for the a changeset using the -randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are -generated based on the content](#id_desc)). - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -To make changes to the filesystem of this container image, create a new -directory, such as `f60c56784b83`, and initialize it with a snapshot of the -parent image's root filesystem, so that the directory is identical to that -of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very -efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - image configuration JSON file - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json -├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── manifest.json -└── repositories -``` - -There is a directory for each layer in the image. Each directory is named with -a 64 character hex name that is deterministically generated from the layer -information. These names are not necessarily layer DiffIDs or ChainIDs. Each of -these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The legacy JSON metadata for an image layer. In this version of - the image specification, layers don't have JSON metadata, but in - [version 1](v1.md), they did. A file is created for each layer in the - v1 format for backward compatibility. - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -Note that this directory layout is only important for backward compatibility. -Current implementations use the paths specified in `manifest.json`. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -The `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. -This file is only used for backwards compatibility. Current implementations use -the `manifest.json` file instead. - -The `manifest.json` file provides the image JSON for the top-level image, and -optionally for parent images that this image was derived from. It consists of -an array of metadata entries: - -``` -[ - { - "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", - "RepoTags": ["busybox:latest"], - "Layers": [ - "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", - "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" - ] - } -] -``` - -There is an entry in the array for each image. - -The `Config` field references another file in the tar which includes the image -JSON for this image. - -The `RepoTags` field lists references pointing to this image. - -The `Layers` field points to the filesystem changeset tars. - -An optional `Parent` field references the imageID of the parent image. This -parent must be part of the same `manifest.json` file. - -This file shouldn't be confused with the distribution manifest, used to push -and pull images. - -Generally, implementations that support this version of the spec will use -the `manifest.json` file if available, and older implementations will use the -legacy `*/json` files and `repositories`. diff --git a/image/spec/v1.md b/image/spec/v1.md deleted file mode 100644 index 57a599b8f..000000000 --- a/image/spec/v1.md +++ /dev/null @@ -1,573 +0,0 @@ -# Docker Image Specification v1.0.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -## Terminology - -This specification uses the following terms: - -
-
- Layer -
-
- Images are composed of layers. Image layer is a general - term which may be used to refer to one or both of the following: - -
    -
  1. The metadata for the layer, described in the JSON format.
  2. -
  3. The filesystem changes described by a layer.
  4. -
- - To refer to the former you may use the term Layer JSON or - Layer Metadata. To refer to the latter you may use the term - Image Filesystem Changeset or Image Diff. -
-
- Image JSON -
-
- Each layer has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Image ID -
-
- Each layer is given an ID upon its creation. It is - represented as a hexadecimal encoding of 256 bits, e.g., - a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Image IDs should be sufficiently random so as to be globally unique. - 32 bytes read from /dev/urandom is sufficient for all - practical purposes. Alternatively, an image ID may be derived as a - cryptographic hash of image contents as the result is considered - indistinguishable from random. The choice is left up to implementors. -
-
- Image Parent -
-
- Most layer metadata structs contain a parent field which - refers to the Image from which another directly descends. An image - contains a separate JSON metadata file and set of changes relative to - the filesystem of its parent image. Image Ancestor and - Image Descendant are also common terms. -
-
- Image Checksum -
-
- Layer metadata structs contain a cryptographic hash of the contents of - the layer's filesystem changeset. Though the set of changes exists as a - simple Tar archive, two archives with identical filenames and content - will have different SHA digests if the last-access or last-modified - times of any entries differ. For this reason, image checksums are - generated using the TarSum algorithm which produces a cryptographic - hash of file contents and selected headers only. Details of this - algorithm are described in the separate TarSum specification. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. An image name suffix (the name component after :) is - often referred to as a tag as well, though it strictly refers to the - full name of an image. Acceptable values for a tag suffix are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], punctuation - characters [._-], and MUST NOT contain a : - character. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. Acceptable values for repository name are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], and punctuation - characters [._-], however it MAY contain additional - / and : characters for organizational - purposes, with the last : character being interpreted - dividing the repository component of the name from the tag suffix - component. -
-
- -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", - "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", - "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", - "created": "2014-10-13T21:19:18.674353812Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "Size": 271828, - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - } -} -``` - -### Image JSON Field Descriptions - -
-
- id string -
-
- Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies - the image. -
-
- parent string -
-
- ID of the parent image. If there is no parent image then this field - should be omitted. A collection of images may share many of the same - ancestor layers. This organizational structure is strictly a tree with - any one layer having either no parent or a single parent and zero or - more descendant layers. Cycles are not allowed and implementations - should be careful to avoid creating them or iterating through a cycle - indefinitely. -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- checksum string -
-
- Image Checksum of the filesystem changeset associated with the image - layer. -
-
- Size integer -
-
- The size in bytes of the filesystem changeset associated with the image - layer. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory named with the -ID of the image being created. Here is the initial empty directory structure -for the changeset for an image with ID `c3167915dc9d` ([real IDs are much -longer](#id_desc), but this example use a truncated one here for brevity. -Implementations need not name the rootfs directory in this way but it may be -convenient for keeping record of a large number of image layers.): - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -The TarSum checksum for the archive file is then computed and placed in the -JSON metadata along with the execution parameters. - -To make changes to the filesystem of this container image, create a new -directory named with a new ID, such as `f60c56784b83`, and initialize it with -a snapshot of the parent image's root filesystem, so that the directory is -identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem -can make this very efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - all image layer JSON files - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c -│   ├── VERSION -│   ├── json -│   └── layer.tar -└── repositories -``` - -There are one or more directories named with the ID for each layer in a full -image. Each of these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The JSON metadata for an image layer - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -And the `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. - -## Loading an Image Filesystem Changeset - -Unpacking a bundle of image layer JSON files and their corresponding filesystem -changesets can be done using a series of steps: - -1. Follow the parent IDs of image layers to find the root ancestor (an image -with no parent ID specified). -2. For every image layer, in order from root ancestor and descending down, -extract the contents of that layer's filesystem changeset archive into a -directory which will be used as the root of a container filesystem. - - - Extract all contents of each archive. - - Walk the directory tree once more, removing any files with the prefix - `.wh.` and the corresponding file or directory named without this prefix. - - -## Implementations - -This specification is an admittedly imperfect description of an -imperfectly-understood problem. The Docker project is, in turn, an attempt to -implement this specification. Our goal and our execution toward it will evolve -over time, but our primary concern in this specification and in our -implementation is compatibility and interoperability. diff --git a/image/store.go b/image/store.go deleted file mode 100644 index 92ac438db..000000000 --- a/image/store.go +++ /dev/null @@ -1,295 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// Store is an interface for creating and accessing images -type Store interface { - Create(config []byte) (ID, error) - Get(id ID) (*Image, error) - Delete(id ID) ([]layer.Metadata, error) - Search(partialID string) (ID, error) - SetParent(id ID, parent ID) error - GetParent(id ID) (ID, error) - Children(id ID) []ID - Map() map[ID]*Image - Heads() map[ID]*Image -} - -// LayerGetReleaser is a minimal interface for getting and releasing images. -type LayerGetReleaser interface { - Get(layer.ChainID) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type imageMeta struct { - layer layer.Layer - children map[ID]struct{} -} - -type store struct { - sync.Mutex - ls LayerGetReleaser - images map[ID]*imageMeta - fs StoreBackend - digestSet *digest.Set -} - -// NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { - is := &store{ - ls: ls, - images: make(map[ID]*imageMeta), - fs: fs, - digestSet: digest.NewSet(), - } - - // load all current images and retain layers - if err := is.restore(); err != nil { - return nil, err - } - - return is, nil -} - -func (is *store) restore() error { - err := is.fs.Walk(func(id ID) error { - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image %v, %v", id, err) - return nil - } - var l layer.Layer - if chainID := img.RootFS.ChainID(); chainID != "" { - l, err = is.ls.Get(chainID) - if err != nil { - return err - } - } - if err := is.digestSet.Add(digest.Digest(id)); err != nil { - return err - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[ID(id)] = imageMeta - - return nil - }) - if err != nil { - return err - } - - // Second pass to fill in children maps - for id := range is.images { - if parent, err := is.GetParent(id); err == nil { - if parentMeta := is.images[parent]; parentMeta != nil { - parentMeta.children[id] = struct{}{} - } - } - } - - return nil -} - -func (is *store) Create(config []byte) (ID, error) { - var img Image - err := json.Unmarshal(config, &img) - if err != nil { - return "", err - } - - // Must reject any config that references diffIDs from the history - // which aren't among the rootfs layers. - rootFSLayers := make(map[layer.DiffID]struct{}) - for _, diffID := range img.RootFS.DiffIDs { - rootFSLayers[diffID] = struct{}{} - } - - layerCounter := 0 - for _, h := range img.History { - if !h.EmptyLayer { - layerCounter++ - } - } - if layerCounter > len(img.RootFS.DiffIDs) { - return "", errors.New("too many non-empty layers in History section") - } - - dgst, err := is.fs.Set(config) - if err != nil { - return "", err - } - imageID := ID(dgst) - - is.Lock() - defer is.Unlock() - - if _, exists := is.images[imageID]; exists { - return imageID, nil - } - - layerID := img.RootFS.ChainID() - - var l layer.Layer - if layerID != "" { - l, err = is.ls.Get(layerID) - if err != nil { - return "", err - } - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[imageID] = imageMeta - if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { - delete(is.images, imageID) - return "", err - } - - return imageID, nil -} - -func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() - - dgst, err := is.digestSet.Lookup(term) - if err != nil { - if err == digest.ErrDigestNotFound { - err = fmt.Errorf("No such image: %s", term) - } - return "", err - } - return ID(dgst), nil -} - -func (is *store) Get(id ID) (*Image, error) { - // todo: Check if image is in images - // todo: Detect manual insertions and start using them - config, err := is.fs.Get(id) - if err != nil { - return nil, err - } - - img, err := NewFromJSON(config) - if err != nil { - return nil, err - } - img.computedID = id - - img.Parent, err = is.GetParent(id) - if err != nil { - img.Parent = "" - } - - return img, nil -} - -func (is *store) Delete(id ID) ([]layer.Metadata, error) { - is.Lock() - defer is.Unlock() - - imageMeta := is.images[id] - if imageMeta == nil { - return nil, fmt.Errorf("unrecognized image ID %s", id.String()) - } - for id := range imageMeta.children { - is.fs.DeleteMetadata(id, "parent") - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - - if err := is.digestSet.Remove(digest.Digest(id)); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) - } - delete(is.images, id) - is.fs.Delete(id) - - if imageMeta.layer != nil { - return is.ls.Release(imageMeta.layer) - } - return nil, nil -} - -func (is *store) SetParent(id, parent ID) error { - is.Lock() - defer is.Unlock() - parentMeta := is.images[parent] - if parentMeta == nil { - return fmt.Errorf("unknown parent image ID %s", parent.String()) - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - parentMeta.children[id] = struct{}{} - return is.fs.SetMetadata(id, "parent", []byte(parent)) -} - -func (is *store) GetParent(id ID) (ID, error) { - d, err := is.fs.GetMetadata(id, "parent") - if err != nil { - return "", err - } - return ID(d), nil // todo: validate? -} - -func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() - - return is.children(id) -} - -func (is *store) children(id ID) []ID { - var ids []ID - if is.images[id] != nil { - for id := range is.images[id].children { - ids = append(ids, id) - } - } - return ids -} - -func (is *store) Heads() map[ID]*Image { - return is.imagesMap(false) -} - -func (is *store) Map() map[ID]*Image { - return is.imagesMap(true) -} - -func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() - - images := make(map[ID]*Image) - - for id := range is.images { - if !all && len(is.children(id)) > 0 { - continue - } - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) - continue - } - images[id] = img - } - return images -} diff --git a/image/store_test.go b/image/store_test.go deleted file mode 100644 index 50f8aa8b8..000000000 --- a/image/store_test.go +++ /dev/null @@ -1,300 +0,0 @@ -package image - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -func TestRestore(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - _, err = fs.Set([]byte(`invalid`)) - if err != nil { - t.Fatal(err) - } - id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - err = fs.SetMetadata(id2, "parent", []byte(id1)) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - imgs := is.Map() - if actual, expected := len(imgs), 2; actual != expected { - t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) - } - - img1, err := is.Get(ID(id1)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img1.computedID, ID(id1); actual != expected { - t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) - } - - if actual, expected := img1.computedID.String(), string(id1); actual != expected { - t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) - } - - img2, err := is.Get(ID(id2)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img1.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) - } - - if actual, expected := img2.Comment, "def"; actual != expected { - t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) - } - - p, err := is.GetParent(ID(id1)) - if err == nil { - t.Fatal("expected error for getting parent") - } - - p, err = is.GetParent(ID(id2)) - if err != nil { - t.Fatal(err) - } - if actual, expected := p, ID(id1); actual != expected { - t.Fatalf("invalid parent: expected %q, got %q", expected, actual) - } - - children := is.Children(ID(id1)) - if len(children) != 1 { - t.Fatalf("invalid children length: %q", len(children)) - } - if actual, expected := children[0], ID(id2); actual != expected { - t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) - } - - heads := is.Heads() - if actual, expected := len(heads), 1; actual != expected { - t.Fatalf("invalid images length: expected %q, got %q", expected, actual) - } - - sid1, err := is.Search(string(id1)[:10]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } - - sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } - - invalidPattern := digest.Digest(id1).Hex()[1:6] - _, err = is.Search(invalidPattern) - if err == nil { - t.Fatalf("expected search for %q to fail", invalidPattern) - } - -} - -func TestAddDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { - t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) - } - - img, err := is.Get(id1) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) - } - - id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - - err = is.SetParent(id2, id1) - if err != nil { - t.Fatal(err) - } - - pid1, err := is.GetParent(id2) - if err != nil { - t.Fatal(err) - } - if actual, expected := pid1, id1; actual != expected { - t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) - } - - _, err = is.Delete(id1) - if err != nil { - t.Fatal(err) - } - _, err = is.Get(id1) - if err == nil { - t.Fatalf("expected get for deleted image %q to fail", id1) - } - _, err = is.Get(id2) - if err != nil { - t.Fatal(err) - } - pid1, err = is.GetParent(id2) - if err == nil { - t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) - } - -} - -func TestSearchAfterDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id1, err := is.Search(string(id)[:15]) - if err != nil { - t.Fatal(err) - } - - if actual, expected := id1, id; expected != actual { - t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) - } - - if _, err := is.Delete(id); err != nil { - t.Fatal(err) - } - - if _, err := is.Search(string(id)[:15]); err == nil { - t.Fatal("expected search after deletion to fail") - } -} - -func TestParentReset(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - if err := is.SetParent(id, id2); err != nil { - t.Fatal(err) - } - - ids := is.Children(id2) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children: %d, got %d", expected, actual) - } - - if err := is.SetParent(id, id3); err != nil { - t.Fatal(err) - } - - ids = is.Children(id2) - if actual, expected := len(ids), 0; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } - - ids = is.Children(id3) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } - -} - -type mockLayerGetReleaser struct{} - -func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} diff --git a/image/tarexport/load.go b/image/tarexport/load.go deleted file mode 100644 index ff42db53f..000000000 --- a/image/tarexport/load.go +++ /dev/null @@ -1,389 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/reference" -) - -func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - progressOutput progress.Output - ) - if !quiet { - progressOutput = sf.NewProgressOutput(outStream, false) - outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} - } - - tmpDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { - return err - } - // read manifest, if no file then load in legacy mode - manifestPath, err := safePath(tmpDir, manifestFileName) - if err != nil { - return err - } - manifestFile, err := os.Open(manifestPath) - if err != nil { - if os.IsNotExist(err) { - return l.legacyLoad(tmpDir, outStream, progressOutput) - } - return manifestFile.Close() - } - defer manifestFile.Close() - - var manifest []manifestItem - if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { - return err - } - - var parentLinks []parentLink - var imageIDsStr string - var imageRefCount int - - for _, m := range manifest { - configPath, err := safePath(tmpDir, m.Config) - if err != nil { - return err - } - config, err := ioutil.ReadFile(configPath) - if err != nil { - return err - } - img, err := image.NewFromJSON(config) - if err != nil { - return err - } - var rootFS image.RootFS - rootFS = *img.RootFS - rootFS.DiffIDs = nil - - if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { - return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) - } - - for i, diffID := range img.RootFS.DiffIDs { - layerPath, err := safePath(tmpDir, m.Layers[i]) - if err != nil { - return err - } - r := rootFS - r.Append(diffID) - newLayer, err := l.ls.Get(r.ChainID()) - if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) - if err != nil { - return err - } - } - defer layer.ReleaseAndLog(l.ls, newLayer) - if expected, actual := diffID, newLayer.DiffID(); expected != actual { - return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) - } - rootFS.Append(diffID) - } - - imgID, err := l.is.Create(config) - if err != nil { - return err - } - imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) - - imageRefCount = 0 - for _, repoTag := range m.RepoTags { - named, err := reference.ParseNamed(repoTag) - if err != nil { - return err - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid tag %q", repoTag) - } - l.setLoadedTag(ref, imgID, outStream) - outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) - imageRefCount++ - } - - parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) - l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") - } - - for _, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - if err := l.setParentID(p.id, p.parentID); err != nil { - return err - } - } - } - - if imageRefCount == 0 { - outStream.Write([]byte(imageIDsStr)) - } - - return nil -} - -func (l *tarexporter) setParentID(id, parentID image.ID) error { - img, err := l.is.Get(id) - if err != nil { - return err - } - parent, err := l.is.Get(parentID) - if err != nil { - return err - } - if !checkValidParent(img, parent) { - return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) - } - return l.is.SetParent(id, parentID) -} - -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { - rawTar, err := os.Open(filename) - if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) - return nil, err - } - defer rawTar.Close() - - var r io.Reader - if progressOutput != nil { - fileInfo, err := rawTar.Stat() - if err != nil { - logrus.Debugf("Error statting file: %v", err) - return nil, err - } - - r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") - } else { - r = rawTar - } - - inflatedLayerData, err := archive.DecompressStream(r) - if err != nil { - return nil, err - } - defer inflatedLayerData.Close() - - if ds, ok := l.ls.(layer.DescribableStore); ok { - return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) - } - return l.ls.Register(inflatedLayerData, rootFS.ChainID()) -} - -func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { - if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { - fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags - } - - if err := l.rs.AddTag(ref, imgID, true); err != nil { - return err - } - return nil -} - -func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { - legacyLoadedMap := make(map[string]image.ID) - - dirs, err := ioutil.ReadDir(tmpDir) - if err != nil { - return err - } - - // every dir represents an image - for _, d := range dirs { - if d.IsDir() { - if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { - return err - } - } - } - - // load tags from repositories file - repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) - if err != nil { - return err - } - repositoriesFile, err := os.Open(repositoriesPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - return repositoriesFile.Close() - } - defer repositoriesFile.Close() - - repositories := make(map[string]map[string]string) - if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { - return err - } - - for name, tagMap := range repositories { - for tag, oldID := range tagMap { - imgID, ok := legacyLoadedMap[oldID] - if !ok { - return fmt.Errorf("invalid target ID: %v", oldID) - } - named, err := reference.WithName(name) - if err != nil { - return err - } - ref, err := reference.WithTag(named, tag) - if err != nil { - return err - } - l.setLoadedTag(ref, imgID, outStream) - } - } - - return nil -} - -func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { - if _, loaded := loadedMap[oldID]; loaded { - return nil - } - configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) - if err != nil { - return err - } - imageJSON, err := ioutil.ReadFile(configPath) - if err != nil { - logrus.Debugf("Error reading json: %v", err) - return err - } - - var img struct{ Parent string } - if err := json.Unmarshal(imageJSON, &img); err != nil { - return err - } - - var parentID image.ID - if img.Parent != "" { - for { - var loaded bool - if parentID, loaded = loadedMap[img.Parent]; !loaded { - if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { - return err - } - } else { - break - } - } - } - - // todo: try to connect with migrate code - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := l.is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) - if err != nil { - return err - } - newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) - if err != nil { - return err - } - rootFS.Append(newLayer.DiffID()) - - h, err := v1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - imgID, err := l.is.Create(config) - if err != nil { - return err - } - - metadata, err := l.ls.Release(newLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return err - } - - if parentID != "" { - if err := l.is.SetParent(imgID, parentID); err != nil { - return err - } - } - - loadedMap[oldID] = imgID - return nil -} - -func safePath(base, path string) (string, error) { - return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) -} - -type parentLink struct { - id, parentID image.ID -} - -func validatedParentLinks(pl []parentLink) (ret []parentLink) { -mainloop: - for i, p := range pl { - ret = append(ret, p) - for _, p2 := range pl { - if p2.id == p.parentID && p2.id != p.id { - continue mainloop - } - } - ret[i].parentID = "" - } - return -} - -func checkValidParent(img, parent *image.Image) bool { - if len(img.History) == 0 && len(parent.History) == 0 { - return true // having history is not mandatory - } - if len(img.History)-len(parent.History) != 1 { - return false - } - for i, h := range parent.History { - if !reflect.DeepEqual(h, img.History[i]) { - return false - } - } - return true -} diff --git a/image/tarexport/save.go b/image/tarexport/save.go deleted file mode 100644 index e1a4947d0..000000000 --- a/image/tarexport/save.go +++ /dev/null @@ -1,349 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/reference" -) - -type imageDescriptor struct { - refs []reference.NamedTagged - layers []string -} - -type saveSession struct { - *tarexporter - outDir string - images map[image.ID]*imageDescriptor - savedLayers map[string]struct{} - diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates -} - -func (l *tarexporter) Save(names []string, outStream io.Writer) error { - images, err := l.parseNames(names) - if err != nil { - return err - } - - return (&saveSession{tarexporter: l, images: images}).save(outStream) -} - -func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { - imgDescr := make(map[image.ID]*imageDescriptor) - - addAssoc := func(id image.ID, ref reference.Named) { - if _, ok := imgDescr[id]; !ok { - imgDescr[id] = &imageDescriptor{} - } - - if ref != nil { - var tagged reference.NamedTagged - if _, ok := ref.(reference.Canonical); ok { - return - } - var ok bool - if tagged, ok = ref.(reference.NamedTagged); !ok { - var err error - if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { - return - } - } - - for _, t := range imgDescr[id].refs { - if tagged.String() == t.String() { - return - } - } - imgDescr[id].refs = append(imgDescr[id].refs, tagged) - } - } - - for _, name := range names { - id, ref, err := reference.ParseIDOrReference(name) - if err != nil { - return nil, err - } - if id != "" { - _, err := l.is.Get(image.ID(id)) - if err != nil { - return nil, err - } - addAssoc(image.ID(id), nil) - continue - } - if ref.Name() == string(digest.Canonical) { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - continue - } - if reference.IsNameOnly(ref) { - assocs := l.rs.ReferencesByName(ref) - for _, assoc := range assocs { - addAssoc(assoc.ImageID, assoc.Ref) - } - if len(assocs) == 0 { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - } - continue - } - var imgID image.ID - if imgID, err = l.rs.Get(ref); err != nil { - return nil, err - } - addAssoc(imgID, ref) - - } - return imgDescr, nil -} - -func (s *saveSession) save(outStream io.Writer) error { - s.savedLayers = make(map[string]struct{}) - s.diffIDPaths = make(map[layer.DiffID]string) - - // get image json - tempDir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - s.outDir = tempDir - reposLegacy := make(map[string]map[string]string) - - var manifest []manifestItem - var parentLinks []parentLink - - for id, imageDescr := range s.images { - foreignSrcs, err := s.saveImage(id) - if err != nil { - return err - } - - var repoTags []string - var layers []string - - for _, ref := range imageDescr.refs { - if _, ok := reposLegacy[ref.Name()]; !ok { - reposLegacy[ref.Name()] = make(map[string]string) - } - reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] - repoTags = append(repoTags, ref.String()) - } - - for _, l := range imageDescr.layers { - layers = append(layers, filepath.Join(l, legacyLayerFileName)) - } - - manifest = append(manifest, manifestItem{ - Config: digest.Digest(id).Hex() + ".json", - RepoTags: repoTags, - Layers: layers, - LayerSources: foreignSrcs, - }) - - parentID, _ := s.is.GetParent(id) - parentLinks = append(parentLinks, parentLink{id, parentID}) - s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") - } - - for i, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - manifest[i].Parent = p.parentID - } - } - - if len(reposLegacy) > 0 { - reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) - f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - } - - manifestFileName := filepath.Join(tempDir, manifestFileName) - f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(manifest); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - - fs, err := archive.Tar(tempDir, archive.Uncompressed) - if err != nil { - return err - } - defer fs.Close() - - if _, err := io.Copy(outStream, fs); err != nil { - return err - } - return nil -} - -func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { - img, err := s.is.Get(id) - if err != nil { - return nil, err - } - - if len(img.RootFS.DiffIDs) == 0 { - return nil, fmt.Errorf("empty export - not implemented") - } - - var parent digest.Digest - var layers []string - var foreignSrcs map[layer.DiffID]distribution.Descriptor - for i := range img.RootFS.DiffIDs { - v1Img := image.V1Image{} - if i == len(img.RootFS.DiffIDs)-1 { - v1Img = img.V1Image - } - rootFS := *img.RootFS - rootFS.DiffIDs = rootFS.DiffIDs[:i+1] - v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) - if err != nil { - return nil, err - } - - v1Img.ID = v1ID.Hex() - if parent != "" { - v1Img.Parent = parent.Hex() - } - - src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) - if err != nil { - return nil, err - } - layers = append(layers, v1Img.ID) - parent = v1ID - if src.Digest != "" { - if foreignSrcs == nil { - foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) - } - foreignSrcs[img.RootFS.DiffIDs[i]] = src - } - } - - configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") - if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { - return nil, err - } - if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { - return nil, err - } - - s.images[id].layers = layers - return foreignSrcs, nil -} - -func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { - if _, exists := s.savedLayers[legacyImg.ID]; exists { - return distribution.Descriptor{}, nil - } - - outDir := filepath.Join(s.outDir, legacyImg.ID) - if err := os.Mkdir(outDir, 0755); err != nil { - return distribution.Descriptor{}, err - } - - // todo: why is this version file here? - if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { - return distribution.Descriptor{}, err - } - - imageConfig, err := json.Marshal(legacyImg) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { - return distribution.Descriptor{}, err - } - - // serialize filesystem - layerPath := filepath.Join(outDir, legacyLayerFileName) - l, err := s.ls.Get(id) - if err != nil { - return distribution.Descriptor{}, err - } - defer layer.ReleaseAndLog(s.ls, l) - - if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { - relPath, err := filepath.Rel(layerPath, oldPath) - if err != nil { - return distribution.Descriptor{}, err - } - os.Symlink(relPath, layerPath) - } else { - - tarFile, err := os.Create(layerPath) - if err != nil { - return distribution.Descriptor{}, err - } - defer tarFile.Close() - - arch, err := l.TarStream() - if err != nil { - return distribution.Descriptor{}, err - } - defer arch.Close() - - if _, err := io.Copy(tarFile, arch); err != nil { - return distribution.Descriptor{}, err - } - - for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { - // todo: maybe save layer created timestamp? - if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { - return distribution.Descriptor{}, err - } - } - - s.diffIDPaths[l.DiffID()] = layerPath - } - s.savedLayers[legacyImg.ID] = struct{}{} - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - return src, nil -} diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go deleted file mode 100644 index c0be95480..000000000 --- a/image/tarexport/tarexport.go +++ /dev/null @@ -1,47 +0,0 @@ -package tarexport - -import ( - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" -) - -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -type manifestItem struct { - Config string - RepoTags []string - Layers []string - Parent image.ID `json:",omitempty"` - LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` -} - -type tarexporter struct { - is image.Store - ls layer.Store - rs reference.Store - loggerImgEvent LogImageEvent -} - -// LogImageEvent defines interface for event generation related to image tar(load and save) operations -type LogImageEvent interface { - //LogImageEvent generates an event related to an image operation - LogImageEvent(imageID, refName, action string) -} - -// NewTarExporter returns new ImageExporter for tar packages -func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { - return &tarexporter{ - is: is, - ls: ls, - rs: rs, - loggerImgEvent: loggerImgEvent, - } -} diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go deleted file mode 100644 index b7a9529ed..000000000 --- a/image/v1/imagev1.go +++ /dev/null @@ -1,156 +0,0 @@ -package v1 - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/engine-api/types/versions" -) - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = "1.8.3" - -// HistoryFromConfig creates a History struct from v1 configuration JSON -func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { - h := image.History{} - var v1Image image.V1Image - if err := json.Unmarshal(imageJSON, &v1Image); err != nil { - return h, err - } - - return image.History{ - Author: v1Image.Author, - Created: v1Image.Created, - CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), - Comment: v1Image.Comment, - EmptyLayer: emptyLayer, - }, nil -} - -// CreateID creates an ID from v1 image, layerID and parent ID. -// Used for backwards compatibility with old clients. -func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { - v1Image.ID = "" - v1JSON, err := json.Marshal(v1Image) - if err != nil { - return "", err - } - - var config map[string]*json.RawMessage - if err := json.Unmarshal(v1JSON, &config); err != nil { - return "", err - } - - // FIXME: note that this is slightly incompatible with RootFS logic - config["layer_id"] = rawJSON(layerID) - if parent != "" { - config["parent"] = rawJSON(parent) - } - - configJSON, err := json.Marshal(config) - if err != nil { - return "", err - } - logrus.Debugf("CreateV1ID %s", configJSON) - - return digest.FromBytes(configJSON), nil -} - -// MakeConfigFromV1Config creates an image config from the legacy V1 config format. -func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { - var dver struct { - DockerVersion string `json:"docker_version"` - } - - if err := json.Unmarshal(imageJSON, &dver); err != nil { - return nil, err - } - - useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) - - if useFallback { - var v1Image image.V1Image - err := json.Unmarshal(imageJSON, &v1Image) - if err != nil { - return nil, err - } - imageJSON, err = json.Marshal(v1Image) - if err != nil { - return nil, err - } - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(imageJSON, &c); err != nil { - return nil, err - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsistent - delete(c, "parent_id") - delete(c, "layer_id") - delete(c, "throwaway") - - c["rootfs"] = rawJSON(rootfs) - c["history"] = rawJSON(history) - - return json.Marshal(c) -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct -func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - imageType := reflect.TypeOf(img).Elem() - for i := 0; i < imageType.NumField(); i++ { - f := imageType.Field(i) - jsonName := strings.Split(f.Tag.Get("json"), ",")[0] - // Parent is handled specially below. - if jsonName != "" && jsonName != "parent" { - delete(configAsMap, jsonName) - } - } - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/image/v1/imagev1_test.go b/image/v1/imagev1_test.go deleted file mode 100644 index 936c55e4c..000000000 --- a/image/v1/imagev1_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/docker/docker/image" -) - -func TestMakeV1ConfigFromConfig(t *testing.T) { - img := &image.Image{ - V1Image: image.V1Image{ - ID: "v2id", - Parent: "v2parent", - OS: "os", - }, - OSVersion: "osversion", - RootFS: &image.RootFS{ - Type: "layers", - }, - } - v2js, err := json.Marshal(img) - if err != nil { - t.Fatal(err) - } - - // Convert the image back in order to get RawJSON() support. - img, err = image.NewFromJSON(v2js) - if err != nil { - t.Fatal(err) - } - - js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) - if err != nil { - t.Fatal(err) - } - - newimg := &image.Image{} - err = json.Unmarshal(js, newimg) - if err != nil { - t.Fatal(err) - } - - if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { - t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) - } - - if newimg.RootFS != nil { - t.Error("rootfs should have been removed") - } - - if newimg.V1Image.OS != "os" { - t.Error("os should have been preserved") - } -} diff --git a/layer/empty.go b/layer/empty.go deleted file mode 100644 index 5e1cb184b..000000000 --- a/layer/empty.go +++ /dev/null @@ -1,48 +0,0 @@ -package layer - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" -) - -// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - -// (1024 NULL bytes) -const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") - -type emptyLayer struct{} - -// EmptyLayer is a layer that corresponds to empty tar. -var EmptyLayer = &emptyLayer{} - -func (el *emptyLayer) TarStream() (io.ReadCloser, error) { - buf := new(bytes.Buffer) - tarWriter := tar.NewWriter(buf) - tarWriter.Close() - return ioutil.NopCloser(buf), nil -} - -func (el *emptyLayer) ChainID() ChainID { - return ChainID(DigestSHA256EmptyTar) -} - -func (el *emptyLayer) DiffID() DiffID { - return DigestSHA256EmptyTar -} - -func (el *emptyLayer) Parent() Layer { - return nil -} - -func (el *emptyLayer) Size() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} diff --git a/layer/empty_test.go b/layer/empty_test.go deleted file mode 100644 index c22da7665..000000000 --- a/layer/empty_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package layer - -import ( - "io" - "testing" - - "github.com/docker/distribution/digest" -) - -func TestEmptyLayer(t *testing.T) { - if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { - t.Fatal("wrong ID for empty layer") - } - - if EmptyLayer.DiffID() != DigestSHA256EmptyTar { - t.Fatal("wrong DiffID for empty layer") - } - - if EmptyLayer.Parent() != nil { - t.Fatal("expected no parent for empty layer") - } - - if size, err := EmptyLayer.Size(); err != nil || size != 0 { - t.Fatal("expected zero size for empty layer") - } - - if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { - t.Fatal("expected zero diffsize for empty layer") - } - - tarStream, err := EmptyLayer.TarStream() - if err != nil { - t.Fatalf("error streaming tar for empty layer: %v", err) - } - - digester := digest.Canonical.New() - _, err = io.Copy(digester.Hash(), tarStream) - - if err != nil { - t.Fatalf("error hashing empty tar layer: %v", err) - } - - if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { - t.Fatal("empty layer tar stream hashes to wrong value") - } -} diff --git a/layer/filestore.go b/layer/filestore.go deleted file mode 100644 index 6361af647..000000000 --- a/layer/filestore.go +++ /dev/null @@ -1,354 +0,0 @@ -package layer - -import ( - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) - -type fileMetadataStore struct { - root string -} - -type fileMetadataTransaction struct { - store *fileMetadataStore - root string -} - -// NewFSMetadataStore returns an instance of a metadata store -// which is backed by files on disk using the provided root -// as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - return &fileMetadataStore{ - root: root, - }, nil -} - -func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { - dgst := digest.Digest(layer) - return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) -} - -func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { - return filepath.Join(fms.getLayerDirectory(layer), filename) -} - -func (fms *fileMetadataStore) getMountDirectory(mount string) string { - return filepath.Join(fms.root, "mounts", mount) -} - -func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { - return filepath.Join(fms.getMountDirectory(mount), filename) -} - -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { - tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return nil, err - } - - td, err := ioutil.TempDir(tmpDir, "layer-") - if err != nil { - return nil, err - } - // Create a new tempdir - return &fileMetadataTransaction{ - store: fms, - root: td, - }, nil -} - -func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) -} - -func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) -} - -func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { - jsonRef, err := json.Marshal(ref) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(fm.root, "descriptor.json"), jsonRef, 0644) -} - -func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - var wc io.WriteCloser - if compressInput { - wc = gzip.NewWriter(f) - } else { - wc = f - } - - return ioutils.NewWriteCloserWrapper(wc, func() error { - wc.Close() - return f.Close() - }), nil -} - -func (fm *fileMetadataTransaction) Commit(layer ChainID) error { - finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { - return err - } - return os.Rename(fm.root, finalDir) -} - -func (fm *fileMetadataTransaction) Cancel() error { - return os.RemoveAll(fm.root) -} - -func (fm *fileMetadataTransaction) String() string { - return fm.root -} - -func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) - if err != nil { - return 0, err - } - - size, err := strconv.ParseInt(string(content), 10, 64) - if err != nil { - return 0, err - } - - return size, nil -} - -func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) - if err != nil { - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return DiffID(dgst), nil -} - -func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) - if err != nil { - if os.IsNotExist(err) { - // only return empty descriptor to represent what is stored - return distribution.Descriptor{}, nil - } - return distribution.Descriptor{}, err - } - - var ref distribution.Descriptor - err = json.Unmarshal(content, &ref) - if err != nil { - return distribution.Descriptor{}, err - } - return ref, err -} - -func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { - fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) - if err != nil { - return nil, err - } - f, err := gzip.NewReader(fz) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(f, func() error { - f.Close() - return fz.Close() - }), nil -} - -func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) -} - -func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) -} - -func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid mount id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid init id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { - var ids []ChainID - for _, algorithm := range supportedAlgorithms { - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, nil, err - } - - for _, fi := range fileInfos { - if fi.IsDir() && fi.Name() != "mounts" { - dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) - } else { - ids = append(ids, ChainID(dgst)) - } - } - } - } - - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) - if err != nil { - if os.IsNotExist(err) { - return ids, []string{}, nil - } - return nil, nil, err - } - - var mounts []string - for _, fi := range fileInfos { - if fi.IsDir() { - mounts = append(mounts, fi.Name()) - } - } - - return ids, mounts, nil -} - -func (fms *fileMetadataStore) Remove(layer ChainID) error { - return os.RemoveAll(fms.getLayerDirectory(layer)) -} - -func (fms *fileMetadataStore) RemoveMount(mount string) error { - return os.RemoveAll(fms.getMountDirectory(mount)) -} diff --git a/layer/filestore_test.go b/layer/filestore_test.go deleted file mode 100644 index 55e3b2853..000000000 --- a/layer/filestore_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package layer - -import ( - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "strings" - "syscall" - "testing" - - "github.com/docker/distribution/digest" -) - -func randomLayerID(seed int64) ChainID { - r := rand.New(rand.NewSource(seed)) - - return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) -} - -func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { - td, err := ioutil.TempDir("", "layers-") - if err != nil { - t.Fatal(err) - } - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - - return fms.(*fileMetadataStore), td, func() { - if err := os.RemoveAll(td); err != nil { - t.Logf("Failed to cleanup %q: %s", td, err) - } - } -} - -func assertNotDirectoryError(t *testing.T, err error) { - perr, ok := err.(*os.PathError) - if !ok { - t.Fatalf("Unexpected error %#v, expected path error", err) - } - - if perr.Err != syscall.ENOTDIR { - t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) - } -} - -func TestCommitFailure(t *testing.T) { - fms, td, cleanup := newFileMetadataStore(t) - defer cleanup() - - if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { - t.Fatal(err) - } - - tx, err := fms.StartTransaction() - if err != nil { - t.Fatal(err) - } - - if err := tx.SetSize(0); err != nil { - t.Fatal(err) - } - - err = tx.Commit(randomLayerID(5)) - if err == nil { - t.Fatalf("Expected error committing with invalid layer parent directory") - } - assertNotDirectoryError(t, err) -} - -func TestStartTransactionFailure(t *testing.T) { - fms, td, cleanup := newFileMetadataStore(t) - defer cleanup() - - if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { - t.Fatal(err) - } - - _, err := fms.StartTransaction() - if err == nil { - t.Fatalf("Expected error starting transaction with invalid layer parent directory") - } - assertNotDirectoryError(t, err) - - if err := os.Remove(filepath.Join(td, "tmp")); err != nil { - t.Fatal(err) - } - - tx, err := fms.StartTransaction() - if err != nil { - t.Fatal(err) - } - - if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { - t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) - } - - if err := tx.Cancel(); err != nil { - t.Fatal(err) - } -} diff --git a/layer/layer.go b/layer/layer.go deleted file mode 100644 index 3881447b6..000000000 --- a/layer/layer.go +++ /dev/null @@ -1,270 +0,0 @@ -// Package layer is package for managing read-only -// and read-write mounts on the union file system -// driver. Read-only mounts are referenced using a -// content hash and are protected from mutation in -// the exposed interface. The tar format is used -// to create read-only layers and export both -// read-only and writable layers. The exported -// tar data for a read-only layer should match -// the tar used to create the layer. -package layer - -import ( - "errors" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/archive" -) - -var ( - // ErrLayerDoesNotExist is used when an operation is - // attempted on a layer which does not exist. - ErrLayerDoesNotExist = errors.New("layer does not exist") - - // ErrLayerNotRetained is used when a release is - // attempted on a layer which is not retained. - ErrLayerNotRetained = errors.New("layer not retained") - - // ErrMountDoesNotExist is used when an operation is - // attempted on a mount layer which does not exist. - ErrMountDoesNotExist = errors.New("mount does not exist") - - // ErrMountNameConflict is used when a mount is attempted - // to be created but there is already a mount with the name - // used for creation. - ErrMountNameConflict = errors.New("mount already exists with name") - - // ErrActiveMount is used when an operation on a - // mount is attempted but the layer is still - // mounted and the operation cannot be performed. - ErrActiveMount = errors.New("mount still active") - - // ErrNotMounted is used when requesting an active - // mount but the layer is not mounted. - ErrNotMounted = errors.New("not mounted") - - // ErrMaxDepthExceeded is used when a layer is attempted - // to be created which would result in a layer depth - // greater than the 125 max. - ErrMaxDepthExceeded = errors.New("max depth exceeded") - - // ErrNotSupported is used when the action is not supported - // on the current platform - ErrNotSupported = errors.New("not support on this platform") -) - -// ChainID is the content-addressable ID of a layer. -type ChainID digest.Digest - -// String returns a string rendition of a layer ID -func (id ChainID) String() string { - return string(id) -} - -// DiffID is the hash of an individual layer tar. -type DiffID digest.Digest - -// String returns a string rendition of a layer DiffID -func (diffID DiffID) String() string { - return string(diffID) -} - -// TarStreamer represents an object which may -// have its contents exported as a tar stream. -type TarStreamer interface { - // TarStream returns a tar archive stream - // for the contents of a layer. - TarStream() (io.ReadCloser, error) -} - -// Layer represents a read-only layer -type Layer interface { - TarStreamer - - // ChainID returns the content hash of the entire layer chain. The hash - // chain is made up of DiffID of top layer and all of its parents. - ChainID() ChainID - - // DiffID returns the content hash of the layer - // tar stream used to create this layer. - DiffID() DiffID - - // Parent returns the next layer in the layer chain. - Parent() Layer - - // Size returns the size of the entire layer chain. The size - // is calculated from the total size of all files in the layers. - Size() (int64, error) - - // DiffSize returns the size difference of the top layer - // from parent layer. - DiffSize() (int64, error) - - // Metadata returns the low level storage metadata associated - // with layer. - Metadata() (map[string]string, error) -} - -// RWLayer represents a layer which is -// read and writable -type RWLayer interface { - TarStreamer - - // Name of mounted layer - Name() string - - // Parent returns the layer which the writable - // layer was created from. - Parent() Layer - - // Mount mounts the RWLayer and returns the filesystem path - // the to the writable layer. - Mount(mountLabel string) (string, error) - - // Unmount unmounts the RWLayer. This should be called - // for every mount. If there are multiple mount calls - // this operation will only decrement the internal mount counter. - Unmount() error - - // Size represents the size of the writable layer - // as calculated by the total size of the files - // changed in the mutable layer. - Size() (int64, error) - - // Changes returns the set of changes for the mutable layer - // from the base layer. - Changes() ([]archive.Change, error) - - // Metadata returns the low level metadata for the mutable layer - Metadata() (map[string]string, error) -} - -// Metadata holds information about a -// read-only layer -type Metadata struct { - // ChainID is the content hash of the layer - ChainID ChainID - - // DiffID is the hash of the tar data used to - // create the layer - DiffID DiffID - - // Size is the size of the layer and all parents - Size int64 - - // DiffSize is the size of the top layer - DiffSize int64 -} - -// MountInit is a function to initialize a -// writable mount. Changes made here will -// not be included in the Tar stream of the -// RWLayer. -type MountInit func(root string) error - -// Store represents a backend for managing both -// read-only and read-write layers. -type Store interface { - Register(io.Reader, ChainID) (Layer, error) - Get(ChainID) (Layer, error) - Release(Layer) ([]Metadata, error) - - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) - GetRWLayer(id string) (RWLayer, error) - GetMountID(id string) (string, error) - ReleaseRWLayer(RWLayer) ([]Metadata, error) - - Cleanup() error - DriverStatus() [][2]string - DriverName() string -} - -// DescribableStore represents a layer store capable of storing -// descriptors for layers. -type DescribableStore interface { - RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) -} - -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - SetDescriptor(distribution.Descriptor) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - GetDescriptor(ChainID) (distribution.Descriptor, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - -// CreateChainID returns ID for a layerDigest slice -func CreateChainID(dgsts []DiffID) ChainID { - return createChainIDFromParent("", dgsts...) -} - -func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) -} - -// ReleaseAndLog releases the provided layer from the given layer -// store, logging any error and release metadata -func ReleaseAndLog(ls Store, l Layer) { - metadata, err := ls.Release(l) - if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) - } - LogReleaseMetadata(metadata) -} - -// LogReleaseMetadata logs a metadata array, uses this to -// ensure consistent logging for release metadata -func LogReleaseMetadata(metadatas []Metadata) { - for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) - } -} diff --git a/layer/layer_store.go b/layer/layer_store.go deleted file mode 100644 index 6d5cb2599..000000000 --- a/layer/layer_store.go +++ /dev/null @@ -1,659 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// maxLayerDepth represents the maximum number of -// layers which can be chained together. 125 was -// chosen to account for the 127 max in some -// graphdrivers plus the 2 additional layers -// used to create a rwlayer. -const maxLayerDepth = 125 - -type layerStore struct { - store MetadataStore - driver graphdriver.Driver - - layerMap map[ChainID]*roLayer - layerL sync.Mutex - - mounts map[string]*mountedLayer - mountL sync.Mutex -} - -// StoreOptions are the options used to create a new Store instance -type StoreOptions struct { - StorePath string - MetadataStorePathTemplate string - GraphDriver string - GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// NewStoreFromOptions creates a new Store instance -func NewStoreFromOptions(options StoreOptions) (Store, error) { - driver, err := graphdriver.New( - options.StorePath, - options.GraphDriver, - options.GraphDriverOptions, - options.UIDMaps, - options.GIDMaps) - if err != nil { - return nil, fmt.Errorf("error initializing graphdriver: %v", err) - } - logrus.Debugf("Using graph driver %s", driver) - - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } - - return NewStoreFromGraphDriver(fms, driver) -} - -// NewStoreFromGraphDriver creates a new Store instance using the provided -// metadata store and graph driver. The metadata store will be used to restore -// the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { - ls := &layerStore{ - store: store, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, - } - - ids, mounts, err := store.List() - if err != nil { - return nil, err - } - - for _, id := range ids { - l, err := ls.loadLayer(id) - if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) - continue - } - if l.parent != nil { - l.parent.referenceCount++ - } - } - - for _, mount := range mounts { - if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) - } - } - - return ls, nil -} - -func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { - cl, ok := ls.layerMap[layer] - if ok { - return cl, nil - } - - diff, err := ls.store.GetDiffID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) - } - - size, err := ls.store.GetSize(layer) - if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) - } - - cacheID, err := ls.store.GetCacheID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) - } - - parent, err := ls.store.GetParent(layer) - if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) - } - - descriptor, err := ls.store.GetDescriptor(layer) - if err != nil { - return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) - } - - cl = &roLayer{ - chainID: layer, - diffID: diff, - size: size, - cacheID: cacheID, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return nil, err - } - cl.parent = p - } - - ls.layerMap[cl.chainID] = cl - - return cl, nil -} - -func (ls *layerStore) loadMount(mount string) error { - if _, ok := ls.mounts[mount]; ok { - return nil - } - - mountID, err := ls.store.GetMountID(mount) - if err != nil { - return err - } - - initID, err := ls.store.GetInitID(mount) - if err != nil { - return err - } - - parent, err := ls.store.GetMountParent(mount) - if err != nil { - return err - } - - ml := &mountedLayer{ - name: mount, - mountID: mountID, - initID: initID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return err - } - ml.parent = p - - p.referenceCount++ - } - - ls.mounts[ml.name] = ml - - return nil -} - -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.New() - tr := io.TeeReader(ts, digester.Hash()) - - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err - } - - applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) - if err != nil { - return err - } - - // Discard trailing data but ensure metadata is picked up to reconstruct stream - io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed - - layer.size = applySize - layer.diffID = DiffID(digester.Digest()) - - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) - - return nil -} - -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) -} - -func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - if p.depth() >= maxLayerDepth { - err = ErrMaxDepthExceeded - return nil, err - } - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: stringid.GenerateRandomID(), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil { - return nil, err - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) - if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) - } - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - if err = ls.applyTar(tx, ts, pid, layer); err != nil { - return nil, err - } - - if layer.parent == nil { - layer.chainID = ChainID(layer.diffID) - } else { - layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return the error - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { - l, ok := ls.layerMap[layer] - if !ok { - return nil - } - - l.referenceCount++ - - return l -} - -func (ls *layerStore) get(l ChainID) *roLayer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - return ls.getWithoutLock(l) -} - -func (ls *layerStore) Get(l ChainID) (Layer, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layer := ls.getWithoutLock(l) - if layer == nil { - return nil, ErrLayerDoesNotExist - } - - return layer.getReference(), nil -} - -func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { - err := ls.driver.Remove(layer.cacheID) - if err != nil { - return err - } - - err = ls.store.Remove(layer.chainID) - if err != nil { - return err - } - metadata.DiffID = layer.diffID - metadata.ChainID = layer.chainID - metadata.Size, err = layer.Size() - if err != nil { - return err - } - metadata.DiffSize = layer.size - - return nil -} - -func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { - depth := 0 - removed := []Metadata{} - for { - if l.referenceCount == 0 { - panic("layer not retained") - } - l.referenceCount-- - if l.referenceCount != 0 { - return removed, nil - } - - if len(removed) == 0 && depth > 0 { - panic("cannot remove layer with child") - } - if l.hasReferences() { - panic("cannot delete referenced layer") - } - var metadata Metadata - if err := ls.deleteLayer(l, &metadata); err != nil { - return nil, err - } - - delete(ls.layerMap, l.chainID) - removed = append(removed, metadata) - - if l.parent == nil { - return removed, nil - } - - depth++ - l = l.parent - } -} - -func (ls *layerStore) Release(l Layer) ([]Metadata, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - layer, ok := ls.layerMap[l.ChainID()] - if !ok { - return []Metadata{}, nil - } - if !layer.hasReference(l) { - return nil, ErrLayerNotRetained - } - - layer.deleteReference(l) - - return ls.releaseLayer(layer) -} - -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - return nil, ErrMountNameConflict - } - - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - m = &mountedLayer{ - name: name, - parent: p, - mountID: ls.mountID(name), - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) - if err != nil { - return nil, err - } - m.initID = pid - } - - if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil { - return nil, err - } - - if err = ls.saveMount(m); err != nil { - return nil, err - } - - return m.getReference(), nil -} - -func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return nil, ErrMountDoesNotExist - } - - return mount.getReference(), nil -} - -func (ls *layerStore) GetMountID(id string) (string, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return "", ErrMountDoesNotExist - } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) - - return mount.mountID, nil -} - -func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[l.Name()] - if !ok { - return []Metadata{}, nil - } - - if err := m.deleteReference(l); err != nil { - return nil, err - } - - if m.hasReferences() { - return []Metadata{}, nil - } - - if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - if m.initID != "" { - if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - } - - if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - delete(ls.mounts, m.Name()) - - ls.layerL.Lock() - defer ls.layerL.Unlock() - if m.parent != nil { - return ls.releaseLayer(m.parent) - } - - return []Metadata{}, nil -} - -func (ls *layerStore) saveMount(mount *mountedLayer) error { - if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { - return err - } - - if mount.initID != "" { - if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { - return err - } - } - - if mount.parent != nil { - if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { - return err - } - } - - ls.mounts[mount.name] = mount - - return nil -} - -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { - // Use "-init" to maintain compatibility with graph drivers - // which are expecting this layer with this special name. If all - // graph drivers can be updated to not rely on knowing about this layer - // then the initID should be randomly generated. - initID := fmt.Sprintf("%s-init", graphID) - - if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil { - return "", err - } - p, err := ls.driver.Get(initID, "") - if err != nil { - return "", err - } - - if err := initFunc(p); err != nil { - ls.driver.Put(initID) - return "", err - } - - if err := ls.driver.Put(initID); err != nil { - return "", err - } - - return initID, nil -} - -func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { - diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) - if !ok { - diffDriver = &naiveDiffPathDriver{ls.driver} - } - - defer metadata.Close() - - // get our relative path to the container - fileGetCloser, err := diffDriver.DiffGetter(graphID) - if err != nil { - return err - } - defer fileGetCloser.Close() - - metaUnpacker := storage.NewJSONUnpacker(metadata) - upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) - return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) -} - -func (ls *layerStore) Cleanup() error { - return ls.driver.Cleanup() -} - -func (ls *layerStore) DriverStatus() [][2]string { - return ls.driver.Status() -} - -func (ls *layerStore) DriverName() string { - return ls.driver.String() -} - -type naiveDiffPathDriver struct { - graphdriver.Driver -} - -type fileGetPutter struct { - storage.FileGetter - driver graphdriver.Driver - id string -} - -func (w *fileGetPutter) Close() error { - return w.driver.Put(w.id) -} - -func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p, err := n.Driver.Get(id, "") - if err != nil { - return nil, err - } - return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil -} diff --git a/layer/layer_store_windows.go b/layer/layer_store_windows.go deleted file mode 100644 index 1276a912c..000000000 --- a/layer/layer_store_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/distribution" -) - -func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, descriptor) -} diff --git a/layer/layer_test.go b/layer/layer_test.go deleted file mode 100644 index 8e6817c96..000000000 --- a/layer/layer_test.go +++ /dev/null @@ -1,768 +0,0 @@ -package layer - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/vfs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" -) - -func init() { - graphdriver.ApplyUncompressedLayer = archive.UnpackLayer - vfs.CopyWithTar = archive.CopyWithTar -} - -func newVFSGraphDriver(td string) (graphdriver.Driver, error) { - uidMap := []idtools.IDMap{ - { - ContainerID: 0, - HostID: os.Getuid(), - Size: 1, - }, - } - gidMap := []idtools.IDMap{ - { - ContainerID: 0, - HostID: os.Getgid(), - Size: 1, - }, - } - - return graphdriver.GetDriver("vfs", td, nil, uidMap, gidMap) -} - -func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { - td, err := ioutil.TempDir("", "graph-") - if err != nil { - t.Fatal(err) - } - - driver, err := newVFSGraphDriver(td) - if err != nil { - t.Fatal(err) - } - - return driver, func() { - os.RemoveAll(td) - } -} - -func newTestStore(t *testing.T) (Store, string, func()) { - td, err := ioutil.TempDir("", "layerstore-") - if err != nil { - t.Fatal(err) - } - - graph, graphcleanup := newTestGraphDriver(t) - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - return ls, td, func() { - graphcleanup() - os.RemoveAll(td) - } -} - -type layerInit func(root string) error - -func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { - containerID := stringid.GenerateRandomID() - mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) - if err != nil { - return nil, err - } - - path, err := mount.Mount("") - if err != nil { - return nil, err - } - - if err := layerFunc(path); err != nil { - return nil, err - } - - ts, err := mount.TarStream() - if err != nil { - return nil, err - } - defer ts.Close() - - layer, err := ls.Register(ts, parent) - if err != nil { - return nil, err - } - - if err := mount.Unmount(); err != nil { - return nil, err - } - - if _, err := ls.ReleaseRWLayer(mount); err != nil { - return nil, err - } - - return layer, nil -} - -type FileApplier interface { - ApplyFile(root string) error -} - -type testFile struct { - name string - content []byte - permission os.FileMode -} - -func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { - return &testFile{ - name: name, - content: content, - permission: perm, - } -} - -func (tf *testFile) ApplyFile(root string) error { - fullPath := filepath.Join(root, tf.name) - if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { - return err - } - // Check if already exists - if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { - if err := os.Chmod(fullPath, tf.permission); err != nil { - return err - } - } - if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { - return err - } - return nil -} - -func initWithFiles(files ...FileApplier) layerInit { - return func(root string) error { - for _, f := range files { - if err := f.ApplyFile(root); err != nil { - return err - } - } - return nil - } -} - -func getCachedLayer(l Layer) *roLayer { - if rl, ok := l.(*referencedCacheLayer); ok { - return rl.roLayer - } - return l.(*roLayer) -} - -func getMountLayer(l RWLayer) *mountedLayer { - return l.(*referencedRWLayer).mountedLayer -} - -func createMetadata(layers ...Layer) []Metadata { - metadata := make([]Metadata, len(layers)) - for i := range layers { - size, err := layers[i].Size() - if err != nil { - panic(err) - } - - metadata[i].ChainID = layers[i].ChainID() - metadata[i].DiffID = layers[i].DiffID() - metadata[i].Size = size - metadata[i].DiffSize = getCachedLayer(layers[i]).size - } - - return metadata -} - -func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { - if len(metadata) != len(expectedMetadata) { - t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) - } - - for i := range metadata { - if metadata[i] != expectedMetadata[i] { - t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) - } - } - if t.Failed() { - t.FailNow() - } -} - -func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { - layerCount := len(ls.(*layerStore).layerMap) - expectedMetadata := createMetadata(removed...) - metadata, err := ls.Release(layer) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, expectedMetadata) - - if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { - t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) - } -} - -func cacheID(l Layer) string { - return getCachedLayer(l).cacheID -} - -func assertLayerEqual(t *testing.T, l1, l2 Layer) { - if l1.ChainID() != l2.ChainID() { - t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) - } - if l1.DiffID() != l2.DiffID() { - t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) - } - - size1, err := l1.Size() - if err != nil { - t.Fatal(err) - } - - size2, err := l2.Size() - if err != nil { - t.Fatal(err) - } - - if size1 != size2 { - t.Fatalf("Mismatched size: %d vs %d", size1, size2) - } - - if cacheID(l1) != cacheID(l2) { - t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) - } - - p1 := l1.Parent() - p2 := l2.Parent() - if p1 != nil && p2 != nil { - assertLayerEqual(t, p1, p2) - } else if p1 != nil || p2 != nil { - t.Fatalf("Mismatched parents: %v vs %v", p1, p2) - } -} - -func TestMountAndRegister(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - size, _ := layer.Size() - t.Logf("Layer size: %d", size) - - mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) - if err != nil { - t.Fatal(err) - } - - path2, err := mount2.Mount("") - if err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - - if expected := "some test data"; string(b) != expected { - t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) - } - - if err := mount2.Unmount(); err != nil { - t.Fatal(err) - } - - if _, err := ls.ReleaseRWLayer(mount2); err != nil { - t.Fatal(err) - } -} - -func TestLayerRelease(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - t.Logf("Layer1: %s", layer1.ChainID()) - t.Logf("Layer2: %s", layer2.ChainID()) - t.Logf("Layer3a: %s", layer3a.ChainID()) - t.Logf("Layer3b: %s", layer3b.ChainID()) - - if expected := 4; len(ls.(*layerStore).layerMap) != expected { - t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) - } - - releaseAndCheckDeleted(t, ls, layer3b, layer3b) - releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) -} - -func TestStoreRestore(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { - t.Fatal(err) - } - - if err := m.Unmount(); err != nil { - t.Fatal(err) - } - - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) - if err != nil { - t.Fatal(err) - } - - layer3b, err := ls2.Get(layer3.ChainID()) - if err != nil { - t.Fatal(err) - } - - assertLayerEqual(t, layer3b, layer3) - - // Create again with same name, should return error - if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { - t.Fatal("Expected error creating mount with same name") - } else if err != ErrMountNameConflict { - t.Fatal(err) - } - - m2, err := ls2.GetRWLayer("some-mount_name") - if err != nil { - t.Fatal(err) - } - - if mountPath, err := m2.Mount(""); err != nil { - t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) - } - - if mountPath, err := m2.Mount(""); err != nil { - t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) - } - if err := m2.Unmount(); err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - if expected := "nothing here"; string(b) != expected { - t.Fatalf("Unexpected content %q, expected %q", string(b), expected) - } - - if err := m2.Unmount(); err != nil { - t.Fatal(err) - } - - if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { - t.Fatal(err) - } else if len(metadata) != 0 { - t.Fatalf("Unexpectedly deleted layers: %#v", metadata) - } - - if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { - t.Fatal(err) - } else if len(metadata) != 0 { - t.Fatalf("Unexpectedly deleted layers: %#v", metadata) - } - - releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) -} - -func TestTarStreamStability(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - files1 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), - } - addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) - files2 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), - newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), - } - - tar1, err := tarFromFiles(files1...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(files2...) - if err != nil { - t.Fatal(err) - } - - layer1, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - // hack layer to add file - p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") - if err != nil { - t.Fatal(err) - } - - if err := addedFile.ApplyFile(p); err != nil { - t.Fatal(err) - } - - if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { - t.Fatal(err) - } - - layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - id1 := layer1.ChainID() - t.Logf("Layer 1: %s", layer1.ChainID()) - t.Logf("Layer 2: %s", layer2.ChainID()) - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - assertLayerDiff(t, tar2, layer2) - - layer1b, err := ls.Get(id1) - if err != nil { - t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - assertLayerDiff(t, tar1, layer1b) - - if _, err := ls.Release(layer1b); err != nil { - t.Fatal(err) - } -} - -func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { - expectedDigest := digest.FromBytes(expected) - - if digest.Digest(layer.DiffID()) != expectedDigest { - t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) - } - - ts, err := layer.TarStream() - if err != nil { - t.Fatal(err) - } - defer ts.Close() - - actual, err := ioutil.ReadAll(ts) - if err != nil { - t.Fatal(err) - } - - if len(actual) != len(expected) { - logByteDiff(t, actual, expected) - t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) - } - - actualDigest := digest.FromBytes(actual) - - if actualDigest != expectedDigest { - logByteDiff(t, actual, expected) - t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) - } -} - -const maxByteLog = 4 * 1024 - -func logByteDiff(t *testing.T, actual, expected []byte) { - d1, d2 := byteDiff(actual, expected) - if len(d1) == 0 && len(d2) == 0 { - return - } - - prefix := len(actual) - len(d1) - if len(d1) > maxByteLog || len(d2) > maxByteLog { - t.Logf("Byte diff after %d matching bytes", prefix) - } else { - t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) - } -} - -// byteDiff returns the differing bytes after the matching prefix -func byteDiff(b1, b2 []byte) ([]byte, []byte) { - i := 0 - for i < len(b1) && i < len(b2) { - if b1[i] != b2[i] { - break - } - i++ - } - - return b1[i:], b2[i:] -} - -func tarFromFiles(files ...FileApplier) ([]byte, error) { - td, err := ioutil.TempDir("", "tar-") - if err != nil { - return nil, err - } - defer os.RemoveAll(td) - - for _, f := range files { - if err := f.ApplyFile(td); err != nil { - return nil, err - } - } - - r, err := archive.Tar(td, archive.Uncompressed) - if err != nil { - return nil, err - } - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, r); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// assertReferences asserts that all the references are to the same -// image and represent the full set of references to that image. -func assertReferences(t *testing.T, references ...Layer) { - if len(references) == 0 { - return - } - base := references[0].(*referencedCacheLayer).roLayer - seenReferences := map[Layer]struct{}{ - references[0]: {}, - } - for i := 1; i < len(references); i++ { - other := references[i].(*referencedCacheLayer).roLayer - if base != other { - t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) - } - if _, ok := base.references[references[i]]; !ok { - t.Fatalf("Reference not part of reference list: %v", references[i]) - } - if _, ok := seenReferences[references[i]]; ok { - t.Fatalf("Duplicated reference %v", references[i]) - } - } - if rc := len(base.references); rc != len(references) { - t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) - } -} - -func TestRegisterExistingLayer(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - baseFiles := []FileApplier{ - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layerFiles := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), - } - - li := initWithFiles(baseFiles...) - layer1, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - tar1, err := tarFromFiles(layerFiles...) - if err != nil { - t.Fatal(err) - } - - layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer2a, layer2b) -} - -func TestTarStreamVerification(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, tmpdir, cleanup := newTestStore(t) - defer cleanup() - - files1 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0644), - } - files2 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0600), // different perm - } - - tar1, err := tarFromFiles(files1...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(files2...) - if err != nil { - t.Fatal(err) - } - - layer1, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - layer2, err := ls.Register(bytes.NewReader(tar2), "") - if err != nil { - t.Fatal(err) - } - id1 := digest.Digest(layer1.ChainID()) - id2 := digest.Digest(layer2.ChainID()) - - // Replace tar data files - src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) - if err != nil { - t.Fatal(err) - } - - dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) - if err != nil { - t.Fatal(err) - } - - if _, err := io.Copy(dst, src); err != nil { - t.Fatal(err) - } - - src.Close() - dst.Close() - - ts, err := layer2.TarStream() - if err != nil { - t.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, ts) - if err == nil { - t.Fatal("expected data verification to fail") - } - if !strings.Contains(err.Error(), "could not verify layer data") { - t.Fatalf("wrong error returned from tarstream: %q", err) - } -} diff --git a/layer/layer_unix.go b/layer/layer_unix.go deleted file mode 100644 index 776b78ac0..000000000 --- a/layer/layer_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin openbsd solaris - -package layer - -import "github.com/docker/docker/pkg/stringid" - -func (ls *layerStore) mountID(name string) string { - return stringid.GenerateRandomID() -} diff --git a/layer/layer_unix_test.go b/layer/layer_unix_test.go deleted file mode 100644 index 9aa1afd59..000000000 --- a/layer/layer_unix_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build !windows - -package layer - -import "testing" - -func graphDiffSize(ls Store, l Layer) (int64, error) { - cl := getCachedLayer(l) - var parent string - if cl.parent != nil { - parent = cl.parent.cacheID - } - return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) -} - -// Unix as Windows graph driver does not support Changes which is indirectly -// invoked by calling DiffSize on the driver -func TestLayerSize(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - content1 := []byte("Base contents") - content2 := []byte("Added contents") - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) - if err != nil { - t.Fatal(err) - } - - layer1DiffSize, err := graphDiffSize(ls, layer1) - if err != nil { - t.Fatal(err) - } - - if int(layer1DiffSize) != len(content1) { - t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) - } - - layer1Size, err := layer1.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content1); int(layer1Size) != expected { - t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) - } - - layer2DiffSize, err := graphDiffSize(ls, layer2) - if err != nil { - t.Fatal(err) - } - - if int(layer2DiffSize) != len(content2) { - t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) - } - - layer2Size, err := layer2.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content1) + len(content2); int(layer2Size) != expected { - t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) - } - -} diff --git a/layer/layer_windows.go b/layer/layer_windows.go deleted file mode 100644 index e20311a09..000000000 --- a/layer/layer_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" -) - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/layer/migration.go b/layer/migration.go deleted file mode 100644 index b45c31099..000000000 --- a/layer/migration.go +++ /dev/null @@ -1,256 +0,0 @@ -package layer - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// CreateRWLayerByGraphID creates a RWLayer in the layer store using -// the provided name with the given graphID. To get the RWLayer -// after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - if m.parent.chainID != parent { - return errors.New("name conflict, mismatched parent") - } - if m.mountID != graphID { - return errors.New("mount already exists") - } - - return nil - } - - if !ls.driver.Exists(graphID) { - return fmt.Errorf("graph ID does not exist: %q", graphID) - } - - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // TODO: Ensure graphID has correct parent - - m = &mountedLayer{ - name: name, - parent: p, - mountID: graphID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - // Check for existing init layer - initID := fmt.Sprintf("%s-init", graphID) - if ls.driver.Exists(initID) { - m.initID = initID - } - - if err = ls.saveMount(m); err != nil { - return err - } - - return nil -} - -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.New() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { - rawarchive, err := ls.driver.Diff(id, parent) - if err != nil { - return - } - defer rawarchive.Close() - - f, err := os.Create(newTarDataPath) - if err != nil { - return - } - defer f.Close() - mfz := gzip.NewWriter(f) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - packerCounter := &packSizeCounter{metaPacker, &size} - - archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) - if err != nil { - return - } - dgst, err := digest.FromReader(archive) - if err != nil { - return - } - diffID = DiffID(dgst) - return -} - -func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: graphID, - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - diffID: diffID, - size: size, - chainID: createChainIDFromParent(parent, diffID), - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - tsw, err := tx.TarSplitWriter(false) - if err != nil { - return nil, err - } - defer tsw.Close() - tdf, err := os.Open(tarDataFile) - if err != nil { - return nil, err - } - defer tdf.Close() - _, err = io.Copy(tsw, tdf) - if err != nil { - return nil, err - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -type unpackSizeCounter struct { - unpacker storage.Unpacker - size *int64 -} - -func (u *unpackSizeCounter) Next() (*storage.Entry, error) { - e, err := u.unpacker.Next() - if err == nil && u.size != nil { - *u.size += e.Size - } - return e, err -} - -type packSizeCounter struct { - packer storage.Packer - size *int64 -} - -func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { - n, err := p.packer.AddEntry(e) - if err == nil && p.size != nil { - *p.size += e.Size - } - return n, err -} diff --git a/layer/migration_test.go b/layer/migration_test.go deleted file mode 100644 index 50ea6407b..000000000 --- a/layer/migration_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package layer - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -func writeTarSplitFile(name string, tarContent []byte) error { - f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return err - } - defer f.Close() - - fz := gzip.NewWriter(f) - - metaPacker := storage.NewJSONPacker(fz) - defer fz.Close() - - rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) - if err != nil { - return err - } - - if _, err := io.Copy(ioutil.Discard, rdr); err != nil { - return err - } - - return nil -} - -func TestLayerMigration(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - td, err := ioutil.TempDir("", "migration-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - - layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - } - - tar1, err := tarFromFiles(layer1Files...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(layer2Files...) - if err != nil { - t.Fatal(err) - } - - graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) - if err != nil { - t.Fatal(err) - } - - graphID1 := stringid.GenerateRandomID() - if err := graph.Create(graphID1, "", "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil { - t.Fatal(err) - } - - tf1 := filepath.Join(td, "tar1.json.gz") - if err := writeTarSplitFile(tf1, tar1); err != nil { - t.Fatal(err) - } - - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - newTarDataPath := filepath.Join(td, ".migration-tardata") - diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - - layer1b, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer1a, layer1b) - // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) - if err != nil { - t.Fatal(err) - } - - graphID2 := stringid.GenerateRandomID() - if err := graph.Create(graphID2, graphID1, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil { - t.Fatal(err) - } - - tf2 := filepath.Join(td, "tar2.json.gz") - if err := writeTarSplitFile(tf2, tar2); err != nil { - t.Fatal(err) - } - diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) - if err != nil { - t.Fatal(err) - } - assertReferences(t, layer2a, layer2b) - - if metadata, err := ls.Release(layer2a); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Unexpected layer removal after first release: %#v", metadata) - } - - metadata, err := ls.Release(layer2b) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, createMetadata(layer2a)) -} - -func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { - t, err := tarFromFiles(files...) - if err != nil { - return nil, err - } - - if err := graph.Create(graphID, parentID, "", nil); err != nil { - return nil, err - } - if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil { - return nil, err - } - - ar, err := graph.Diff(graphID, parentID) - if err != nil { - return nil, err - } - defer ar.Close() - - return ioutil.ReadAll(ar) -} - -func TestLayerMigrationNoTarsplit(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - td, err := ioutil.TempDir("", "migration-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - - layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - } - - graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) - if err != nil { - t.Fatal(err) - } - graphID1 := stringid.GenerateRandomID() - graphID2 := stringid.GenerateRandomID() - - tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) - if err != nil { - t.Fatal(err) - } - - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - newTarDataPath := filepath.Join(td, ".migration-tardata") - diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - - layer1b, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer1a, layer1b) - - // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) - if err != nil { - t.Fatal(err) - } - - diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - assertReferences(t, layer2a, layer2b) - - if metadata, err := ls.Release(layer2a); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Unexpected layer removal after first release: %#v", metadata) - } - - metadata, err := ls.Release(layer2b) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, createMetadata(layer2a)) -} - -func TestMountMigration(t *testing.T) { - // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - baseFiles := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - initFiles := []FileApplier{ - newTestFile("/etc/hosts", []byte{}, 0644), - newTestFile("/etc/resolv.conf", []byte{}, 0644), - } - mountFiles := []FileApplier{ - newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), - } - - initTar, err := tarFromFiles(initFiles...) - if err != nil { - t.Fatal(err) - } - - mountTar, err := tarFromFiles(mountFiles...) - if err != nil { - t.Fatal(err) - } - - graph := ls.(*layerStore).driver - - layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) - if err != nil { - t.Fatal(err) - } - - graphID1 := layer1.(*referencedCacheLayer).cacheID - - containerID := stringid.GenerateRandomID() - containerInit := fmt.Sprintf("%s-init", containerID) - - if err := graph.Create(containerInit, graphID1, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil { - t.Fatal(err) - } - - if err := graph.Create(containerID, containerInit, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil { - t.Fatal(err) - } - - if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { - t.Fatal(err) - } - - rwLayer1, err := ls.GetRWLayer("migration-mount") - if err != nil { - t.Fatal(err) - } - - if _, err := rwLayer1.Mount(""); err != nil { - t.Fatal(err) - } - - changes, err := rwLayer1.Changes() - if err != nil { - t.Fatal(err) - } - - if expected := 5; len(changes) != expected { - t.Logf("Changes %#v", changes) - t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) - } - - sortChanges(changes) - - assertChange(t, changes[0], archive.Change{ - Path: "/etc", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[1], archive.Change{ - Path: "/etc/hosts", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[2], archive.Change{ - Path: "/root", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[3], archive.Change{ - Path: "/root/.bashrc", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[4], archive.Change{ - Path: "/root/testfile1.txt", - Kind: archive.ChangeAdd, - }) - - if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { - t.Fatal("Expected error creating mount with same name") - } else if err != ErrMountNameConflict { - t.Fatal(err) - } - - rwLayer2, err := ls.GetRWLayer("migration-mount") - if err != nil { - t.Fatal(err) - } - - if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { - t.Fatal("Expected same layer from get with same name as from migrate") - } - - if _, err := rwLayer2.Mount(""); err != nil { - t.Fatal(err) - } - - if _, err := rwLayer2.Mount(""); err != nil { - t.Fatal(err) - } - - if metadata, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) - } - - if err := rwLayer1.Unmount(); err != nil { - t.Fatal(err) - } - - if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { - t.Fatal(err) - } - - if err := rwLayer2.Unmount(); err != nil { - t.Fatal(err) - } - if err := rwLayer2.Unmount(); err != nil { - t.Fatal(err) - } - metadata, err := ls.ReleaseRWLayer(rwLayer2) - if err != nil { - t.Fatal(err) - } - if len(metadata) == 0 { - t.Fatal("Expected base layer to be deleted when deleting mount") - } - - assertMetadata(t, metadata, createMetadata(layer1)) -} diff --git a/layer/mount_test.go b/layer/mount_test.go deleted file mode 100644 index 7a8637eae..000000000 --- a/layer/mount_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package layer - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -func TestMountInit(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) - initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) - - li := initWithFiles(basefile) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return initfile.ApplyFile(root) - } - - m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(filepath.Join(path, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - if expected := "init data!"; string(b) != expected { - t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) - } - - if fi.Mode().Perm() != 0777 { - t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) - } -} - -func TestMountSize(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - content1 := []byte("Base contents") - content2 := []byte("Mutable contents") - contentInit := []byte("why am I excluded from the size ☹") - - li := initWithFiles(newTestFile("file1", content1, 0644)) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return newTestFile("file-init", contentInit, 0777).ApplyFile(root) - } - - m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { - t.Fatal(err) - } - - mountSize, err := m.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content2); int(mountSize) != expected { - t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) - } -} - -func TestMountChanges(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - basefiles := []FileApplier{ - newTestFile("testfile1.txt", []byte("base data!"), 0644), - newTestFile("testfile2.txt", []byte("base data!"), 0644), - newTestFile("testfile3.txt", []byte("base data!"), 0644), - } - initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) - - li := initWithFiles(basefiles...) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return initfile.ApplyFile(root) - } - - m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { - t.Fatal(err) - } - - if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { - t.Fatal(err) - } - - changes, err := m.Changes() - if err != nil { - t.Fatal(err) - } - - if expected := 4; len(changes) != expected { - t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) - } - - sortChanges(changes) - - assertChange(t, changes[0], archive.Change{ - Path: "/testfile1.txt", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[1], archive.Change{ - Path: "/testfile2.txt", - Kind: archive.ChangeDelete, - }) - assertChange(t, changes[2], archive.Change{ - Path: "/testfile3.txt", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[3], archive.Change{ - Path: "/testfile4.txt", - Kind: archive.ChangeAdd, - }) -} - -func assertChange(t *testing.T, actual, expected archive.Change) { - if actual.Path != expected.Path { - t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) - } - if actual.Kind != expected.Kind { - t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) - } -} - -func sortChanges(changes []archive.Change) { - cs := &changeSorter{ - changes: changes, - } - sort.Sort(cs) -} - -type changeSorter struct { - changes []archive.Change -} - -func (cs *changeSorter) Len() int { - return len(cs.changes) -} - -func (cs *changeSorter) Swap(i, j int) { - cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] -} - -func (cs *changeSorter) Less(i, j int) bool { - return cs.changes[i].Path < cs.changes[j].Path -} diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go deleted file mode 100644 index add33d9f1..000000000 --- a/layer/mounted_layer.go +++ /dev/null @@ -1,103 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -type mountedLayer struct { - name string - mountID string - initID string - parent *roLayer - path string - layerStore *layerStore - - references map[RWLayer]*referencedRWLayer -} - -func (ml *mountedLayer) cacheParent() string { - if ml.initID != "" { - return ml.initID - } - if ml.parent != nil { - return ml.parent.cacheID - } - return "" -} - -func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { - archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) - if err != nil { - return nil, err - } - return archiver, nil -} - -func (ml *mountedLayer) Name() string { - return ml.name -} - -func (ml *mountedLayer) Parent() Layer { - if ml.parent != nil { - return ml.parent - } - - // Return a nil interface instead of an interface wrapping a nil - // pointer. - return nil -} - -func (ml *mountedLayer) Size() (int64, error) { - return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Changes() ([]archive.Change, error) { - return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Metadata() (map[string]string, error) { - return ml.layerStore.driver.GetMetadata(ml.mountID) -} - -func (ml *mountedLayer) getReference() RWLayer { - ref := &referencedRWLayer{ - mountedLayer: ml, - } - ml.references[ref] = ref - - return ref -} - -func (ml *mountedLayer) hasReferences() bool { - return len(ml.references) > 0 -} - -func (ml *mountedLayer) deleteReference(ref RWLayer) error { - if _, ok := ml.references[ref]; !ok { - return ErrLayerNotRetained - } - delete(ml.references, ref) - return nil -} - -func (ml *mountedLayer) retakeReference(r RWLayer) { - if ref, ok := r.(*referencedRWLayer); ok { - ml.references[ref] = ref - } -} - -type referencedRWLayer struct { - *mountedLayer -} - -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { - return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) -} - -// Unmount decrements the activity count and unmounts the underlying layer -// Callers should only call `Unmount` once per call to `Mount`, even on error. -func (rl *referencedRWLayer) Unmount() error { - return rl.layerStore.driver.Put(rl.mountedLayer.mountID) -} diff --git a/layer/ro_layer.go b/layer/ro_layer.go deleted file mode 100644 index 9fb1cafeb..000000000 --- a/layer/ro_layer.go +++ /dev/null @@ -1,172 +0,0 @@ -package layer - -import ( - "fmt" - "io" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" -) - -type roLayer struct { - chainID ChainID - diffID DiffID - parent *roLayer - cacheID string - size int64 - layerStore *layerStore - descriptor distribution.Descriptor - - referenceCount int - references map[Layer]struct{} -} - -func (rl *roLayer) TarStream() (io.ReadCloser, error) { - r, err := rl.layerStore.store.TarSplitReader(rl.chainID) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) - if err != nil { - return nil, err - } - return rc, nil -} - -func (rl *roLayer) ChainID() ChainID { - return rl.chainID -} - -func (rl *roLayer) DiffID() DiffID { - return rl.diffID -} - -func (rl *roLayer) Parent() Layer { - if rl.parent == nil { - return nil - } - return rl.parent -} - -func (rl *roLayer) Size() (size int64, err error) { - if rl.parent != nil { - size, err = rl.parent.Size() - if err != nil { - return - } - } - - return size + rl.size, nil -} - -func (rl *roLayer) DiffSize() (size int64, err error) { - return rl.size, nil -} - -func (rl *roLayer) Metadata() (map[string]string, error) { - return rl.layerStore.driver.GetMetadata(rl.cacheID) -} - -type referencedCacheLayer struct { - *roLayer -} - -func (rl *roLayer) getReference() Layer { - ref := &referencedCacheLayer{ - roLayer: rl, - } - rl.references[ref] = struct{}{} - - return ref -} - -func (rl *roLayer) hasReference(ref Layer) bool { - _, ok := rl.references[ref] - return ok -} - -func (rl *roLayer) hasReferences() bool { - return len(rl.references) > 0 -} - -func (rl *roLayer) deleteReference(ref Layer) { - delete(rl.references, ref) -} - -func (rl *roLayer) depth() int { - if rl.parent == nil { - return 1 - } - return rl.parent.depth() + 1 -} - -func storeLayer(tx MetadataTransaction, layer *roLayer) error { - if err := tx.SetDiffID(layer.diffID); err != nil { - return err - } - if err := tx.SetSize(layer.size); err != nil { - return err - } - if err := tx.SetCacheID(layer.cacheID); err != nil { - return err - } - // Do not store empty descriptors - if layer.descriptor.Digest != "" { - if err := tx.SetDescriptor(layer.descriptor); err != nil { - return err - } - } - if layer.parent != nil { - if err := tx.SetParent(layer.parent.chainID); err != nil { - return err - } - } - - return nil -} - -func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } - return &verifiedReadCloser{ - rc: rc, - dgst: dgst, - verifier: verifier, - }, nil -} - -type verifiedReadCloser struct { - rc io.ReadCloser - dgst digest.Digest - verifier digest.Verifier -} - -func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { - n, err = vrc.rc.Read(p) - if n > 0 { - if n, err := vrc.verifier.Write(p[:n]); err != nil { - return n, err - } - } - if err == io.EOF { - if !vrc.verifier.Verified() { - err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) - } - } - return -} -func (vrc *verifiedReadCloser) Close() error { - return vrc.rc.Close() -} diff --git a/layer/ro_layer_windows.go b/layer/ro_layer_windows.go deleted file mode 100644 index 32bd7182a..000000000 --- a/layer/ro_layer_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package layer - -import "github.com/docker/distribution" - -var _ distribution.Describable = &roLayer{} - -func (rl *roLayer) Descriptor() distribution.Descriptor { - return rl.descriptor -} diff --git a/libcontainerd/client.go b/libcontainerd/client.go deleted file mode 100644 index 7e8e47bcf..000000000 --- a/libcontainerd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "sync" - - "github.com/docker/docker/pkg/locker" -) - -// clientCommon contains the platform agnostic fields used in the client structure -type clientCommon struct { - backend Backend - containers map[string]*container - locker *locker.Locker - mapMutex sync.RWMutex // protects read/write oprations from containers map -} - -func (clnt *client) lock(containerID string) { - clnt.locker.Lock(containerID) -} - -func (clnt *client) unlock(containerID string) { - clnt.locker.Unlock(containerID) -} - -// must hold a lock for cont.containerID -func (clnt *client) appendContainer(cont *container) { - clnt.mapMutex.Lock() - clnt.containers[cont.containerID] = cont - clnt.mapMutex.Unlock() -} -func (clnt *client) deleteContainer(friendlyName string) { - clnt.mapMutex.Lock() - delete(clnt.containers, friendlyName) - clnt.mapMutex.Unlock() -} - -func (clnt *client) getContainer(containerID string) (*container, error) { - clnt.mapMutex.RLock() - container, ok := clnt.containers[containerID] - defer clnt.mapMutex.RUnlock() - if !ok { - return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error - } - return container, nil -} diff --git a/libcontainerd/client_linux.go b/libcontainerd/client_linux.go deleted file mode 100644 index 425842a51..000000000 --- a/libcontainerd/client_linux.go +++ /dev/null @@ -1,525 +0,0 @@ -package libcontainerd - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - specs "github.com/opencontainers/specs/specs-go" - "golang.org/x/net/context" -) - -type client struct { - clientCommon - - // Platform specific properties below here. - remote *remote - q queue - exitNotifiers map[string]*exitNotifier - liveRestore bool -} - -func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - spec, err := container.spec() - if err != nil { - return err - } - sp := spec.Process - sp.Args = specp.Args - sp.Terminal = specp.Terminal - if specp.Env != nil { - sp.Env = specp.Env - } - if specp.Cwd != nil { - sp.Cwd = *specp.Cwd - } - if specp.User != nil { - sp.User = specs.User{ - UID: specp.User.UID, - GID: specp.User.GID, - AdditionalGids: specp.User.AdditionalGids, - } - } - if specp.Capabilities != nil { - sp.Capabilities = specp.Capabilities - } - - p := container.newProcess(processFriendlyName) - - r := &containerd.AddProcessRequest{ - Args: sp.Args, - Cwd: sp.Cwd, - Terminal: sp.Terminal, - Id: containerID, - Env: sp.Env, - User: &containerd.User{ - Uid: sp.User.UID, - Gid: sp.User.GID, - AdditionalGids: sp.User.AdditionalGids, - }, - Pid: processFriendlyName, - Stdin: p.fifo(syscall.Stdin), - Stdout: p.fifo(syscall.Stdout), - Stderr: p.fifo(syscall.Stderr), - Capabilities: sp.Capabilities, - ApparmorProfile: sp.ApparmorProfile, - SelinuxLabel: sp.SelinuxLabel, - NoNewPrivileges: sp.NoNewPrivileges, - Rlimits: convertRlimits(sp.Rlimits), - } - - iopipe, err := p.openFifos(sp.Terminal) - if err != nil { - return err - } - - if _, err := clnt.remote.apiClient.AddProcess(context.Background(), r); err != nil { - p.closeFifos(iopipe) - return err - } - - container.processes[processFriendlyName] = p - - clnt.unlock(containerID) - - if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { - return err - } - clnt.lock(containerID) - - return nil -} - -func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { - root, err := filepath.Abs(clnt.remote.stateDir) - if err != nil { - return "", err - } - if uid == 0 && gid == 0 { - return root, nil - } - p := string(filepath.Separator) - for _, d := range strings.Split(root, string(filepath.Separator))[1:] { - p = filepath.Join(p, d) - fi, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { - return "", err - } - if os.IsNotExist(err) || fi.Mode()&1 == 0 { - p = fmt.Sprintf("%s.%d.%d", p, uid, gid) - if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - } - } - return p, nil -} - -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - if ctr, err := clnt.getContainer(containerID); err == nil { - if ctr.restarting { - ctr.restartManager.Cancel() - ctr.clean() - } else { - return fmt.Errorf("Container %s is already active", containerID) - } - } - - uid, gid, err := getRootIDs(specs.Spec(spec)) - if err != nil { - return err - } - dir, err := clnt.prepareBundleDir(uid, gid) - if err != nil { - return err - } - - container := clnt.newContainer(filepath.Join(dir, containerID), options...) - if err := container.clean(); err != nil { - return err - } - - defer func() { - if err != nil { - container.clean() - clnt.deleteContainer(containerID) - } - }() - - if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - - f, err := os.Create(filepath.Join(container.dir, configFilename)) - if err != nil { - return err - } - defer f.Close() - if err := json.NewEncoder(f).Encode(spec); err != nil { - return err - } - - return container.start() -} - -func (clnt *client) Signal(containerID string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: InitFriendlyName, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: pid, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return err - } - _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: containerID, - Pid: processFriendlyName, - Width: uint32(width), - Height: uint32(height), - }) - return err -} - -func (clnt *client) Pause(containerID string) error { - return clnt.setState(containerID, StatePause) -} - -func (clnt *client) setState(containerID, state string) error { - clnt.lock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - clnt.unlock(containerID) - return err - } - if container.systemPid == 0 { - clnt.unlock(containerID) - return fmt.Errorf("No active process for container %s", containerID) - } - st := "running" - if state == StatePause { - st = "paused" - } - chstate := make(chan struct{}) - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Status: st, - }) - if err != nil { - clnt.unlock(containerID) - return err - } - container.pauseMonitor.append(state, chstate) - clnt.unlock(containerID) - <-chstate - return nil -} - -func (clnt *client) Resume(containerID string) error { - return clnt.setState(containerID, StateResume) -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) - if err != nil { - return nil, err - } - return (*Stats)(resp), nil -} - -// Take care of the old 1.11.0 behavior in case the version upgrade -// happened without a clean daemon shutdown -func (clnt *client) cleanupOldRootfs(containerID string) { - // Unmount and delete the bundle folder - if mts, err := mount.GetMounts(); err == nil { - for _, mts := range mts { - if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { - if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { - os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) - } - break - } - } - } -} - -func (clnt *client) setExited(containerID string) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - var exitCode uint32 - if event, ok := clnt.remote.pastEvents[containerID]; ok { - exitCode = event.Status - delete(clnt.remote.pastEvents, containerID) - } - - err := clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: exitCode, - }}) - - clnt.cleanupOldRootfs(containerID) - - return err -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - cont, err := clnt.getContainerdContainer(containerID) - if err != nil { - return nil, err - } - pids := make([]int, len(cont.Pids)) - for i, p := range cont.Pids { - pids[i] = int(p) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is a no-op on Linux. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { - resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) - if err != nil { - return nil, err - } - for _, cont := range resp.Containers { - if cont.Id == containerID { - return cont, nil - } - } - return nil, fmt.Errorf("invalid state response") -} - -func (clnt *client) newContainer(dir string, options ...CreateOption) *container { - container := &container{ - containerCommon: containerCommon{ - process: process{ - dir: dir, - processCommon: processCommon{ - containerID: filepath.Base(dir), - client: clnt, - friendlyName: InitFriendlyName, - }, - }, - processes: make(map[string]*process), - }, - } - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Error(err) - } - } - return container -} - -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - if container.systemPid == 0 { - return fmt.Errorf("No active process for container %s", containerID) - } - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Resources: (*containerd.UpdateResource)(&resources), - }) - if err != nil { - return err - } - return nil -} - -func (clnt *client) getExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.RLock() - defer clnt.mapMutex.RUnlock() - return clnt.exitNotifiers[containerID] -} - -func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.Lock() - w, ok := clnt.exitNotifiers[containerID] - defer clnt.mapMutex.Unlock() - if !ok { - w = &exitNotifier{c: make(chan struct{}), client: clnt} - clnt.exitNotifiers[containerID] = w - } - return w -} - -func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { - clnt.lock(cont.Id) - defer clnt.unlock(cont.Id) - - logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) - - containerID := cont.Id - if _, err := clnt.getContainer(containerID); err == nil { - return fmt.Errorf("container %s is already active", containerID) - } - - defer func() { - if err != nil { - clnt.deleteContainer(cont.Id) - } - }() - - container := clnt.newContainer(cont.BundlePath, options...) - container.systemPid = systemPid(cont) - - var terminal bool - for _, p := range cont.Processes { - if p.Pid == InitFriendlyName { - terminal = p.Terminal - } - } - - iopipe, err := container.openFifos(terminal) - if err != nil { - return err - } - - if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { - return err - } - - clnt.appendContainer(container) - - err = clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateRestore, - Pid: container.systemPid, - }}) - - if err != nil { - return err - } - - if event, ok := clnt.remote.pastEvents[containerID]; ok { - // This should only be a pause or resume event - if event.Type == StatePause || event.Type == StateResume { - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: event.Type, - Pid: container.systemPid, - }}) - } - - logrus.Warnf("unexpected backlog event: %#v", event) - } - - return nil -} - -func (clnt *client) Restore(containerID string, options ...CreateOption) error { - if clnt.liveRestore { - cont, err := clnt.getContainerdContainer(containerID) - if err == nil && cont.Status != "stopped" { - if err := clnt.restore(cont, options...); err != nil { - logrus.Errorf("error restoring %s: %v", containerID, err) - } - return nil - } - return clnt.setExited(containerID) - } - - cont, err := clnt.getContainerdContainer(containerID) - if err == nil && cont.Status != "stopped" { - w := clnt.getOrCreateExitNotifier(containerID) - clnt.lock(cont.Id) - container := clnt.newContainer(cont.BundlePath) - container.systemPid = systemPid(cont) - clnt.appendContainer(container) - clnt.unlock(cont.Id) - - container.discardFifos() - - if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { - logrus.Errorf("error sending sigterm to %v: %v", containerID, err) - } - select { - case <-time.After(10 * time.Second): - if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { - logrus.Errorf("error sending sigkill to %v: %v", containerID, err) - } - select { - case <-time.After(2 * time.Second): - case <-w.wait(): - return nil - } - case <-w.wait(): - return nil - } - } - - clnt.deleteContainer(containerID) - - return clnt.setExited(containerID) -} - -type exitNotifier struct { - id string - client *client - c chan struct{} - once sync.Once -} - -func (en *exitNotifier) close() { - en.once.Do(func() { - close(en.c) - en.client.mapMutex.Lock() - if en == en.client.exitNotifiers[en.id] { - delete(en.client.exitNotifiers, en.id) - } - en.client.mapMutex.Unlock() - }) -} -func (en *exitNotifier) wait() <-chan struct{} { - return en.c -} diff --git a/libcontainerd/client_solaris.go b/libcontainerd/client_solaris.go deleted file mode 100644 index ea8c5e182..000000000 --- a/libcontainerd/client_solaris.go +++ /dev/null @@ -1,56 +0,0 @@ -package libcontainerd - -type client struct { - clientCommon - - // Platform specific properties below here. -} - -func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { - return nil -} - -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { - return nil -} - -func (clnt *client) Signal(containerID string, sig int) error { - return nil -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - return nil -} - -func (clnt *client) Pause(containerID string) error { - return nil -} - -func (clnt *client) Resume(containerID string) error { - return nil -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - return nil, nil -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error { - return nil -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - return nil, nil -} - -// Summary returns a summary of the processes running in a container. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Solaris - // but we should return nil for enabling updating container - return nil -} diff --git a/libcontainerd/client_windows.go b/libcontainerd/client_windows.go deleted file mode 100644 index efd693a37..000000000 --- a/libcontainerd/client_windows.go +++ /dev/null @@ -1,442 +0,0 @@ -package libcontainerd - -import ( - "errors" - "fmt" - "io" - "path/filepath" - "strings" - "syscall" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" -) - -type client struct { - clientCommon - - // Platform specific properties below here (none presently on Windows) -} - -// Win32 error codes that are used for various workarounds -// These really should be ALL_CAPS to match golangs syscall library and standard -// Win32 error conventions, but golint insists on CamelCase. -const ( - CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string - ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started - ErrorBadPathname = syscall.Errno(161) // The specified path is invalid - ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object -) - -// defaultOwner is a tag passed to HCS to allow it to differentiate between -// container creator management stacks. We hard code "docker" in the case -// of docker. -const defaultOwner = "docker" - -// Create is the entrypoint to create a container from a spec, and if successfully -// created, start it too. -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error { - logrus.Debugln("LCD client.Create() with spec", spec) - - configuration := &hcsshim.ContainerConfig{ - SystemType: "Container", - Name: containerID, - Owner: defaultOwner, - - VolumePath: spec.Root.Path, - IgnoreFlushesDuringBoot: spec.Windows.FirstStart, - LayerFolderPath: spec.Windows.LayerFolder, - HostName: spec.Hostname, - } - - if spec.Windows.Networking != nil { - configuration.EndpointList = spec.Windows.Networking.EndpointList - } - - if spec.Windows.Resources != nil { - if spec.Windows.Resources.CPU != nil { - if spec.Windows.Resources.CPU.Shares != nil { - configuration.ProcessorWeight = *spec.Windows.Resources.CPU.Shares - } - if spec.Windows.Resources.CPU.Percent != nil { - configuration.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000 - } - } - if spec.Windows.Resources.Memory != nil { - if spec.Windows.Resources.Memory.Limit != nil { - configuration.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024 - } - } - if spec.Windows.Resources.Storage != nil { - if spec.Windows.Resources.Storage.Bps != nil { - configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps - } - if spec.Windows.Resources.Storage.Iops != nil { - configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops - } - if spec.Windows.Resources.Storage.SandboxSize != nil { - configuration.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize - } - } - } - - if spec.Windows.HvRuntime != nil { - configuration.VolumePath = "" // Always empty for Hyper-V containers - configuration.HvPartition = true - configuration.HvRuntime = &hcsshim.HvRuntime{ - ImagePath: spec.Windows.HvRuntime.ImagePath, - } - - // Images with build version < 14350 don't support running with clone, but - // Windows cannot automatically detect this. Explicitly block cloning in this - // case. - if build := buildFromVersion(spec.Platform.OSVersion); build > 0 && build < 14350 { - configuration.HvRuntime.SkipTemplate = true - } - } - - if configuration.HvPartition { - configuration.SandboxPath = filepath.Dir(spec.Windows.LayerFolder) - } else { - configuration.VolumePath = spec.Root.Path - configuration.LayerFolderPath = spec.Windows.LayerFolder - } - - for _, option := range options { - if s, ok := option.(*ServicingOption); ok { - configuration.Servicing = s.IsServicing - break - } - } - - for _, layerPath := range spec.Windows.LayerPaths { - _, filename := filepath.Split(layerPath) - g, err := hcsshim.NameToGuid(filename) - if err != nil { - return err - } - configuration.Layers = append(configuration.Layers, hcsshim.Layer{ - ID: g.ToString(), - Path: layerPath, - }) - } - - // Add the mounts (volumes, bind mounts etc) to the structure - mds := make([]hcsshim.MappedDir, len(spec.Mounts)) - for i, mount := range spec.Mounts { - mds[i] = hcsshim.MappedDir{ - HostPath: mount.Source, - ContainerPath: mount.Destination, - ReadOnly: mount.Readonly} - } - configuration.MappedDirectories = mds - - hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) - if err != nil { - return err - } - - // Construct a container object for calling start on it. - container := &container{ - containerCommon: containerCommon{ - process: process{ - processCommon: processCommon{ - containerID: containerID, - client: clnt, - friendlyName: InitFriendlyName, - }, - commandLine: strings.Join(spec.Process.Args, " "), - }, - processes: make(map[string]*process), - }, - ociSpec: spec, - hcsContainer: hcsContainer, - } - - container.options = options - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Error(err) - } - } - - // Call start, and if it fails, delete the container from our - // internal structure, start will keep HCS in sync by deleting the - // container there. - logrus.Debugf("Create() id=%s, Calling start()", containerID) - if err := container.start(); err != nil { - clnt.deleteContainer(containerID) - return err - } - - logrus.Debugf("Create() id=%s completed successfully", containerID) - return nil - -} - -// AddProcess is the handler for adding a process to an already running -// container. It's called through docker exec. -func (clnt *client) AddProcess(containerID, processFriendlyName string, procToAdd Process) error { - - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := hcsshim.ProcessConfig{ - EmulateConsole: procToAdd.Terminal, - ConsoleSize: procToAdd.InitialConsoleSize, - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: !procToAdd.Terminal, - } - - // Take working directory from the process to add if it is defined, - // otherwise take from the first process. - if procToAdd.Cwd != "" { - createProcessParms.WorkingDirectory = procToAdd.Cwd - } else { - createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) - createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") - - logrus.Debugf("commandLine: %s", createProcessParms.CommandLine) - - // Start the command running in the container. - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) - if err != nil { - logrus.Errorf("AddProcess %s CreateProcess() failed %s", containerID, err) - return err - } - - stdin, stdout, stderr, err = newProcess.Stdio() - if err != nil { - logrus.Errorf("%s getting std pipes failed %s", containerID, err) - return err - } - - iopipe := &IOPipe{Terminal: procToAdd.Terminal} - iopipe.Stdin = createStdInCloser(stdin, newProcess) - - // TEMP: Work around Windows BS/DEL behavior. - iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, container.ociSpec.Platform.OSVersion, procToAdd.Terminal) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = openReaderFromPipe(stdout) - } - if stderr != nil { - iopipe.Stderr = openReaderFromPipe(stderr) - } - - pid := newProcess.Pid() - - proc := &process{ - processCommon: processCommon{ - containerID: containerID, - friendlyName: processFriendlyName, - client: clnt, - systemPid: uint32(pid), - }, - commandLine: createProcessParms.CommandLine, - hcsProcess: newProcess, - } - - // Add the process to the container's list of processes - container.processes[processFriendlyName] = proc - - // Make sure the lock is not held while calling back into the daemon - clnt.unlock(containerID) - - // Tell the engine to attach streams back to the client - if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { - return err - } - - // Lock again so that the defer unlock doesn't fail. (I really don't like this code) - clnt.lock(containerID) - - // Spin up a go routine waiting for exit to handle cleanup - go container.waitExit(proc, false) - - return nil -} - -// Signal handles `docker stop` on Windows. While Linux has support for -// the full range of signals, signals aren't really implemented on Windows. -// We fake supporting regular stop and -9 to force kill. -func (clnt *client) Signal(containerID string, sig int) error { - var ( - cont *container - err error - ) - - // Get the container as we need it to find the pid of the process. - clnt.lock(containerID) - defer clnt.unlock(containerID) - if cont, err = clnt.getContainer(containerID); err != nil { - return err - } - - cont.manualStopRequested = true - - logrus.Debugf("lcd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) - - if syscall.Signal(sig) == syscall.SIGKILL { - // Terminate the compute system - if err := cont.hcsContainer.Terminate(); err != nil { - if err != hcsshim.ErrVmcomputeOperationPending { - logrus.Errorf("Failed to terminate %s - %q", containerID, err) - } - } - } else { - // Terminate Process - if err := cont.hcsProcess.Kill(); err != nil { - // ignore errors - logrus.Warnf("Failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) - } - } - - return nil -} - -// While Linux has support for the full range of signals, signals aren't really implemented on Windows. -// We try to terminate the specified process whatever signal is requested. -func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - return hcsshim.TerminateProcessInComputeSystem(containerID, p.systemPid) - } - } - - return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) -} - -// Resize handles a CLI event to resize an interactive docker run or docker exec -// window. -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - // Get the libcontainerd container object - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - h, w := uint16(height), uint16(width) - - if processFriendlyName == InitFriendlyName { - logrus.Debugln("Resizing systemPID in", containerID, cont.process.systemPid) - return cont.process.hcsProcess.ResizeConsole(w, h) - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - logrus.Debugln("Resizing exec'd process", containerID, p.systemPid) - return p.hcsProcess.ResizeConsole(w, h) - } - } - - return fmt.Errorf("Resize could not find containerID %s to resize", containerID) - -} - -// Pause handles pause requests for containers -func (clnt *client) Pause(containerID string) error { - return errors.New("Windows: Containers cannot be paused") -} - -// Resume handles resume requests for containers -func (clnt *client) Resume(containerID string) error { - return errors.New("Windows: Containers cannot be paused") -} - -// Stats handles stats requests for containers -func (clnt *client) Stats(containerID string) (*Stats, error) { - return nil, errors.New("Windows: Stats not implemented") -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error { - // TODO Windows: Implement this. For now, just tell the backend the container exited. - logrus.Debugf("lcd Restore %s", containerID) - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: 1 << 31, - }}) -} - -// GetPidsForContainer returns a list of process IDs running in a container. -// Although implemented, this is not used in Windows. -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - var pids []int - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - - // Add the first process - pids = append(pids, int(cont.containerCommon.systemPid)) - // And add all the exec'd processes - for _, p := range cont.processes { - pids = append(pids, int(p.processCommon.systemPid)) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is present in Windows to support docker top. In linux, the -// engine shells out to ps to get process information. On Windows, as -// the containers could be Hyper-V containers, they would not be -// visible on the container host. However, libcontainerd does have -// that information. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - var s []Summary - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - - // Add the first process - s = append(s, Summary{ - Pid: cont.containerCommon.systemPid, - Command: cont.ociSpec.Process.Args[0]}) - // And add all the exec'd processes - for _, p := range cont.processes { - s = append(s, Summary{ - Pid: p.processCommon.systemPid, - Command: p.commandLine}) - } - return s, nil - -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Windows - // but we should return nil for enabling updating container - return nil -} diff --git a/libcontainerd/container.go b/libcontainerd/container.go deleted file mode 100644 index 30bc95028..000000000 --- a/libcontainerd/container.go +++ /dev/null @@ -1,40 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "time" - - "github.com/docker/docker/restartmanager" -) - -const ( - // InitFriendlyName is the name given in the lookup map of processes - // for the first process started in a container. - InitFriendlyName = "init" - configFilename = "config.json" -) - -type containerCommon struct { - process - restartManager restartmanager.RestartManager - restarting bool - processes map[string]*process - startedAt time.Time -} - -// WithRestartManager sets the restartmanager to be used with the container. -func WithRestartManager(rm restartmanager.RestartManager) CreateOption { - return restartManager{rm} -} - -type restartManager struct { - rm restartmanager.RestartManager -} - -func (rm restartManager) Apply(p interface{}) error { - if pr, ok := p.(*container); ok { - pr.restartManager = rm.rm - return nil - } - return fmt.Errorf("WithRestartManager option not supported for this client") -} diff --git a/libcontainerd/container_linux.go b/libcontainerd/container_linux.go deleted file mode 100644 index 324905ed4..000000000 --- a/libcontainerd/container_linux.go +++ /dev/null @@ -1,235 +0,0 @@ -package libcontainerd - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/restartmanager" - "github.com/opencontainers/specs/specs-go" - "golang.org/x/net/context" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. - pauseMonitor - oom bool - runtime string - runtimeArgs []string -} - -type runtime struct { - path string - args []string -} - -// WithRuntime sets the runtime to be used for the created container -func WithRuntime(path string, args []string) CreateOption { - return runtime{path, args} -} - -func (rt runtime) Apply(p interface{}) error { - if pr, ok := p.(*container); ok { - pr.runtime = rt.path - pr.runtimeArgs = rt.args - } - return nil -} - -func (ctr *container) clean() error { - if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { - return nil - } - if _, err := os.Lstat(ctr.dir); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - if err := os.RemoveAll(ctr.dir); err != nil { - return err - } - return nil -} - -// cleanProcess removes the fifos used by an additional process. -// Caller needs to lock container ID before calling this method. -func (ctr *container) cleanProcess(id string) { - if p, ok := ctr.processes[id]; ok { - for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { - if err := os.Remove(p.fifo(i)); err != nil { - logrus.Warnf("failed to remove %v for process %v: %v", p.fifo(i), id, err) - } - } - } - delete(ctr.processes, id) -} - -func (ctr *container) spec() (*specs.Spec, error) { - var spec specs.Spec - dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) - if err != nil { - return nil, err - } - if err := json.Unmarshal(dt, &spec); err != nil { - return nil, err - } - return &spec, nil -} - -func (ctr *container) start() error { - spec, err := ctr.spec() - if err != nil { - return nil - } - iopipe, err := ctr.openFifos(spec.Process.Terminal) - if err != nil { - return err - } - - r := &containerd.CreateContainerRequest{ - Id: ctr.containerID, - BundlePath: ctr.dir, - Stdin: ctr.fifo(syscall.Stdin), - Stdout: ctr.fifo(syscall.Stdout), - Stderr: ctr.fifo(syscall.Stderr), - // check to see if we are running in ramdisk to disable pivot root - NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", - Runtime: ctr.runtime, - RuntimeArgs: ctr.runtimeArgs, - } - ctr.client.appendContainer(ctr) - - resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) - if err != nil { - ctr.closeFifos(iopipe) - return err - } - ctr.startedAt = time.Now() - - if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { - return err - } - ctr.systemPid = systemPid(resp.Container) - - return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, - }}) -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - dir: ctr.dir, - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -func (ctr *container) handleEvent(e *containerd.Event) error { - ctr.client.lock(ctr.containerID) - defer ctr.client.unlock(ctr.containerID) - switch e.Type { - case StateExit, StatePause, StateResume, StateOOM: - st := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: e.Type, - ExitCode: e.Status, - }, - OOMKilled: e.Type == StateExit && ctr.oom, - } - if e.Type == StateOOM { - ctr.oom = true - } - if e.Type == StateExit && e.Pid != InitFriendlyName { - st.ProcessID = e.Pid - st.State = StateExitProcess - } - if st.State == StateExit && ctr.restartManager != nil { - restart, wait, err := ctr.restartManager.ShouldRestart(e.Status, false, time.Since(ctr.startedAt)) - if err != nil { - logrus.Warnf("container %s %v", ctr.containerID, err) - } else if restart { - st.State = StateRestart - ctr.restarting = true - ctr.client.deleteContainer(e.Id) - go func() { - err := <-wait - ctr.client.lock(ctr.containerID) - defer ctr.client.unlock(ctr.containerID) - ctr.restarting = false - if err != nil { - st.State = StateExit - ctr.clean() - ctr.client.q.append(e.Id, func() { - if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { - logrus.Error(err) - } - }) - if err != restartmanager.ErrRestartCanceled { - logrus.Error(err) - } - } else { - ctr.start() - } - }() - } - } - - // Remove process from list if we have exited - // We need to do so here in case the Message Handler decides to restart it. - switch st.State { - case StateExit: - ctr.clean() - ctr.client.deleteContainer(e.Id) - case StateExitProcess: - ctr.cleanProcess(st.ProcessID) - } - ctr.client.q.append(e.Id, func() { - if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { - logrus.Error(err) - } - if e.Type == StatePause || e.Type == StateResume { - ctr.pauseMonitor.handle(e.Type) - } - if e.Type == StateExit { - if en := ctr.client.getExitNotifier(e.Id); en != nil { - en.close() - } - } - }) - - default: - logrus.Debugf("event unhandled: %+v", e) - } - return nil -} - -// discardFifos attempts to fully read the container fifos to unblock processes -// that may be blocked on the writer side. -func (ctr *container) discardFifos() { - for _, i := range []int{syscall.Stdout, syscall.Stderr} { - f := ctr.fifo(i) - c := make(chan struct{}) - go func() { - r := openReaderFromFifo(f) - close(c) // this channel is used to not close the writer too early, before readonly open has been called. - io.Copy(ioutil.Discard, r) - }() - <-c - closeReaderFifo(f) // avoid blocking permanently on open if there is no writer side - } -} diff --git a/libcontainerd/container_solaris.go b/libcontainerd/container_solaris.go deleted file mode 100644 index 24ab1de03..000000000 --- a/libcontainerd/container_solaris.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainerd - -type container struct { - containerCommon -} diff --git a/libcontainerd/container_windows.go b/libcontainerd/container_windows.go deleted file mode 100644 index 33f187eb1..000000000 --- a/libcontainerd/container_windows.go +++ /dev/null @@ -1,312 +0,0 @@ -package libcontainerd - -import ( - "io" - "strings" - "syscall" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. There are none presently on Windows. - options []CreateOption - - // The ociSpec is required, as client.Create() needs a spec, - // but can be called from the RestartManager context which does not - // otherwise have access to the Spec - ociSpec Spec - - manualStopRequested bool - hcsContainer hcsshim.Container -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -func (ctr *container) start() error { - var err error - isServicing := false - - for _, option := range ctr.options { - if s, ok := option.(*ServicingOption); ok && s.IsServicing { - isServicing = true - } - } - - // Start the container. If this is a servicing container, this call will block - // until the container is done with the servicing execution. - logrus.Debugln("Starting container ", ctr.containerID) - if err = ctr.hcsContainer.Start(); err != nil { - logrus.Errorf("Failed to start container: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("Failed to cleanup after a failed Start. %s", err) - } else { - logrus.Debugln("Cleaned up after failed Start by calling Terminate") - } - return err - } - - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := &hcsshim.ProcessConfig{ - EmulateConsole: ctr.ociSpec.Process.Terminal, - WorkingDirectory: ctr.ociSpec.Process.Cwd, - ConsoleSize: ctr.ociSpec.Process.InitialConsoleSize, - CreateStdInPipe: !isServicing, - CreateStdOutPipe: !isServicing, - CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) - createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") - - // Start the command running in the container. - hcsProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) - if err != nil { - logrus.Errorf("CreateProcess() failed %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("Failed to cleanup after a failed CreateProcess. %s", err) - } else { - logrus.Debugln("Cleaned up after failed CreateProcess by calling Terminate") - } - return err - } - ctr.startedAt = time.Now() - - // Save the hcs Process and PID - ctr.process.friendlyName = InitFriendlyName - pid := hcsProcess.Pid() - ctr.process.hcsProcess = hcsProcess - - // If this is a servicing container, wait on the process synchronously here and - // immediately call shutdown/terminate when it returns. - if isServicing { - exitCode := ctr.waitProcessExitCode(&ctr.process) - - if exitCode != 0 { - logrus.Warnf("Servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) - return ctr.terminate() - } - - return ctr.shutdown() - } - - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - stdin, stdout, stderr, err = hcsProcess.Stdio() - if err != nil { - logrus.Errorf("failed to get stdio pipes: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("Failed to cleanup after a failed Stdio. %s", err) - } - return err - } - - iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} - - iopipe.Stdin = createStdInCloser(stdin, hcsProcess) - - // TEMP: Work around Windows BS/DEL behavior. - iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, ctr.ociSpec.Platform.OSVersion, ctr.ociSpec.Process.Terminal) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = openReaderFromPipe(stdout) - } - if stderr != nil { - iopipe.Stderr = openReaderFromPipe(stderr) - } - - // Save the PID - logrus.Debugf("Process started - PID %d", pid) - ctr.systemPid = uint32(pid) - - // Spin up a go routine waiting for exit to handle cleanup - go ctr.waitExit(&ctr.process, true) - - ctr.client.appendContainer(ctr) - - if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { - // OK to return the error here, as waitExit will handle tear-down in HCS - return err - } - - // Tell the docker engine that the container has started. - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft - }} - return ctr.client.backend.StateChanged(ctr.containerID, si) - -} - -// waitProcessExitCode will wait for the given process to exit and return its error code. -func (ctr *container) waitProcessExitCode(process *process) int { - // Block indefinitely for the process to exit. - err := process.hcsProcess.Wait() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("Wait failed (container may have been killed): %s", err) - } - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - exitCode, err := process.hcsProcess.ExitCode() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("Unable to get exit code from container %s", ctr.containerID) - } - // Since we got an error retrieving the exit code, make sure that the code we return - // doesn't incorrectly indicate success. - exitCode = -1 - - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - if err := process.hcsProcess.Close(); err != nil { - logrus.Error(err) - } - - return exitCode -} - -// waitExit runs as a goroutine waiting for the process to exit. It's -// equivalent to (in the linux containerd world) where events come in for -// state change notifications from containerd. -func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { - logrus.Debugln("waitExit on pid", process.systemPid) - - exitCode := ctr.waitProcessExitCode(process) - - // Assume the container has exited - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: uint32(exitCode), - Pid: process.systemPid, - ProcessID: process.friendlyName, - }, - UpdatePending: false, - } - - // But it could have been an exec'd process which exited - if !isFirstProcessToStart { - si.State = StateExitProcess - } else { - updatePending, err := ctr.hcsContainer.HasPendingUpdates() - if err != nil { - logrus.Warnf("HasPendingUpdates failed (container may have been killed): %s", err) - } else { - si.UpdatePending = updatePending - } - - logrus.Debugf("Shutting down container %s", ctr.containerID) - if err := ctr.shutdown(); err != nil { - logrus.Debugf("Failed to shutdown container %s", ctr.containerID) - } else { - logrus.Debugf("Completed shutting down container %s", ctr.containerID) - } - if err := ctr.hcsContainer.Close(); err != nil { - logrus.Error(err) - } - - if !ctr.manualStopRequested && ctr.restartManager != nil { - restart, wait, err := ctr.restartManager.ShouldRestart(uint32(exitCode), false, time.Since(ctr.startedAt)) - if err != nil { - logrus.Error(err) - } else if restart { - si.State = StateRestart - ctr.restarting = true - go func() { - err := <-wait - ctr.restarting = false - ctr.client.deleteContainer(ctr.friendlyName) - if err != nil { - si.State = StateExit - if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { - logrus.Error(err) - } - logrus.Error(err) - } else { - ctr.client.Create(ctr.containerID, ctr.ociSpec, ctr.options...) - } - }() - } - } - - // Remove process from list if we have exited - // We need to do so here in case the Message Handler decides to restart it. - if si.State == StateExit { - ctr.client.deleteContainer(ctr.friendlyName) - } - } - - // Call into the backend to notify it of the state change. - logrus.Debugf("waitExit() calling backend.StateChanged %+v", si) - if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { - logrus.Error(err) - } - - logrus.Debugf("waitExit() completed OK, %+v", si) - return nil -} - -func (ctr *container) shutdown() error { - const shutdownTimeout = time.Minute * 5 - err := ctr.hcsContainer.Shutdown() - if err == hcsshim.ErrVmcomputeOperationPending { - // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. - err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) - } else if err == hcsshim.ErrVmcomputeAlreadyStopped { - err = nil - } - - if err != nil { - logrus.Debugf("error shutting down container %s %v calling terminate", ctr.containerID, err) - if err := ctr.terminate(); err != nil { - return err - } - return err - } - - return nil -} - -func (ctr *container) terminate() error { - const terminateTimeout = time.Minute * 5 - err := ctr.hcsContainer.Terminate() - - if err == hcsshim.ErrVmcomputeOperationPending { - err = ctr.hcsContainer.WaitTimeout(terminateTimeout) - } else if err == hcsshim.ErrVmcomputeAlreadyStopped { - err = nil - } - - if err != nil { - logrus.Debugf("error terminating container %s %v", ctr.containerID, err) - return err - } - - return nil -} diff --git a/libcontainerd/pausemonitor_linux.go b/libcontainerd/pausemonitor_linux.go deleted file mode 100644 index 379cbf1fc..000000000 --- a/libcontainerd/pausemonitor_linux.go +++ /dev/null @@ -1,31 +0,0 @@ -package libcontainerd - -// pauseMonitor is helper to get notifications from pause state changes. -type pauseMonitor struct { - waiters map[string][]chan struct{} -} - -func (m *pauseMonitor) handle(t string) { - if m.waiters == nil { - return - } - q, ok := m.waiters[t] - if !ok { - return - } - if len(q) > 0 { - close(q[0]) - m.waiters[t] = q[1:] - } -} - -func (m *pauseMonitor) append(t string, waiter chan struct{}) { - if m.waiters == nil { - m.waiters = make(map[string][]chan struct{}) - } - _, ok := m.waiters[t] - if !ok { - m.waiters[t] = make([]chan struct{}, 0) - } - m.waiters[t] = append(m.waiters[t], waiter) -} diff --git a/libcontainerd/process.go b/libcontainerd/process.go deleted file mode 100644 index 57562c878..000000000 --- a/libcontainerd/process.go +++ /dev/null @@ -1,18 +0,0 @@ -package libcontainerd - -// processCommon are the platform common fields as part of the process structure -// which keeps the state for the main container process, as well as any exec -// processes. -type processCommon struct { - client *client - - // containerID is the Container ID - containerID string - - // friendlyName is an identifier for the process (or `InitFriendlyName` - // for the first process) - friendlyName string - - // systemPid is the PID of the main container process - systemPid uint32 -} diff --git a/libcontainerd/process_linux.go b/libcontainerd/process_linux.go deleted file mode 100644 index 3c48576fe..000000000 --- a/libcontainerd/process_linux.go +++ /dev/null @@ -1,110 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/ioutils" - "golang.org/x/net/context" -) - -var fdNames = map[int]string{ - syscall.Stdin: "stdin", - syscall.Stdout: "stdout", - syscall.Stderr: "stderr", -} - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - dir string -} - -func (p *process) openFifos(terminal bool) (*IOPipe, error) { - bundleDir := p.dir - if err := os.MkdirAll(bundleDir, 0700); err != nil { - return nil, err - } - - for i := 0; i < 3; i++ { - f := p.fifo(i) - if err := syscall.Mkfifo(f, 0700); err != nil && !os.IsExist(err) { - return nil, fmt.Errorf("mkfifo: %s %v", f, err) - } - } - - io := &IOPipe{} - stdinf, err := os.OpenFile(p.fifo(syscall.Stdin), syscall.O_RDWR, 0) - if err != nil { - return nil, err - } - - io.Stdout = openReaderFromFifo(p.fifo(syscall.Stdout)) - if !terminal { - io.Stderr = openReaderFromFifo(p.fifo(syscall.Stderr)) - } else { - io.Stderr = emptyReader{} - } - - io.Stdin = ioutils.NewWriteCloserWrapper(stdinf, func() error { - stdinf.Close() - _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: p.containerID, - Pid: p.friendlyName, - CloseStdin: true, - }) - return err - }) - - return io, nil -} - -func (p *process) closeFifos(io *IOPipe) { - io.Stdin.Close() - closeReaderFifo(p.fifo(syscall.Stdout)) - closeReaderFifo(p.fifo(syscall.Stderr)) -} - -type emptyReader struct{} - -func (r emptyReader) Read(b []byte) (int, error) { - return 0, io.EOF -} - -func openReaderFromFifo(fn string) io.Reader { - r, w := io.Pipe() - c := make(chan struct{}) - go func() { - close(c) - stdoutf, err := os.OpenFile(fn, syscall.O_RDONLY, 0) - if err != nil { - r.CloseWithError(err) - } - if _, err := io.Copy(w, stdoutf); err != nil { - r.CloseWithError(err) - } - w.Close() - stdoutf.Close() - }() - <-c // wait for the goroutine to get scheduled and syscall to block - return r -} - -// closeReaderFifo closes fifo that may be blocked on open by opening the write side. -func closeReaderFifo(fn string) { - f, err := os.OpenFile(fn, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return - } - f.Close() -} - -func (p *process) fifo(index int) string { - return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) -} diff --git a/libcontainerd/process_solaris.go b/libcontainerd/process_solaris.go deleted file mode 100644 index 2ee9b2566..000000000 --- a/libcontainerd/process_solaris.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainerd - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon -} diff --git a/libcontainerd/process_windows.go b/libcontainerd/process_windows.go deleted file mode 100644 index 01427246f..000000000 --- a/libcontainerd/process_windows.go +++ /dev/null @@ -1,88 +0,0 @@ -package libcontainerd - -import ( - "io" - - "github.com/Microsoft/hcsshim" -) - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - - // commandLine is to support returning summary information for docker top - commandLine string - hcsProcess hcsshim.Process -} - -func openReaderFromPipe(p io.ReadCloser) io.Reader { - r, w := io.Pipe() - go func() { - if _, err := io.Copy(w, p); err != nil { - r.CloseWithError(err) - } - w.Close() - p.Close() - }() - return r -} - -// fixStdinBackspaceBehavior works around a bug in Windows before build 14350 -// where it interpreted DEL as VK_DELETE instead of as VK_BACK. This replaces -// DEL with BS to work around this. -func fixStdinBackspaceBehavior(w io.WriteCloser, osversion string, tty bool) io.WriteCloser { - if !tty { - return w - } - if build := buildFromVersion(osversion); build == 0 || build >= 14350 { - return w - } - - return &delToBsWriter{w} -} - -type delToBsWriter struct { - io.WriteCloser -} - -func (w *delToBsWriter) Write(b []byte) (int, error) { - const ( - backspace = 0x8 - del = 0x7f - ) - bc := make([]byte, len(b)) - for i, c := range b { - if c == del { - bc[i] = backspace - } else { - bc[i] = c - } - } - return w.WriteCloser.Write(bc) -} - -type stdInCloser struct { - io.WriteCloser - hcsshim.Process -} - -func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) *stdInCloser { - return &stdInCloser{ - WriteCloser: pipe, - Process: process, - } -} - -func (stdin *stdInCloser) Close() error { - if err := stdin.WriteCloser.Close(); err != nil { - return err - } - - return stdin.Process.CloseStdin() -} - -func (stdin *stdInCloser) Write(p []byte) (n int, err error) { - return stdin.WriteCloser.Write(p) -} diff --git a/libcontainerd/queue_linux.go b/libcontainerd/queue_linux.go deleted file mode 100644 index 34bc81d24..000000000 --- a/libcontainerd/queue_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package libcontainerd - -import "sync" - -type queue struct { - sync.Mutex - fns map[string]chan struct{} -} - -func (q *queue) append(id string, f func()) { - q.Lock() - defer q.Unlock() - - if q.fns == nil { - q.fns = make(map[string]chan struct{}) - } - - done := make(chan struct{}) - - fn, ok := q.fns[id] - q.fns[id] = done - go func() { - if ok { - <-fn - } - f() - close(done) - }() -} diff --git a/libcontainerd/remote.go b/libcontainerd/remote.go deleted file mode 100644 index 9031e3ae7..000000000 --- a/libcontainerd/remote.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainerd - -// Remote on Linux defines the accesspoint to the containerd grpc API. -// Remote on Windows is largely an unimplemented interface as there is -// no remote containerd. -type Remote interface { - // Client returns a new Client instance connected with given Backend. - Client(Backend) (Client, error) - // Cleanup stops containerd if it was started by libcontainerd. - // Note this is not used on Windows as there is no remote containerd. - Cleanup() - // UpdateOptions allows various remote options to be updated at runtime. - UpdateOptions(...RemoteOption) error -} - -// RemoteOption allows to configure parameters of remotes. -// This is unused on Windows. -type RemoteOption interface { - Apply(Remote) error -} diff --git a/libcontainerd/remote_linux.go b/libcontainerd/remote_linux.go deleted file mode 100644 index 03b42a80e..000000000 --- a/libcontainerd/remote_linux.go +++ /dev/null @@ -1,541 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/locker" - sysinfo "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/transport" -) - -const ( - maxConnectionRetryCount = 3 - connectionRetryDelay = 3 * time.Second - containerdShutdownTimeout = 15 * time.Second - containerdBinary = "docker-containerd" - containerdPidFilename = "docker-containerd.pid" - containerdSockFilename = "docker-containerd.sock" - containerdStateDir = "containerd" - eventTimestampFilename = "event.ts" -) - -type remote struct { - sync.RWMutex - apiClient containerd.APIClient - daemonPid int - stateDir string - rpcAddr string - startDaemon bool - closeManually bool - debugLog bool - rpcConn *grpc.ClientConn - clients []*client - eventTsPath string - pastEvents map[string]*containerd.Event - runtime string - runtimeArgs []string - daemonWaitCh chan struct{} - liveRestore bool - oomScore int -} - -// New creates a fresh instance of libcontainerd remote. -func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { - defer func() { - if err != nil { - err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err) - } - }() - r := &remote{ - stateDir: stateDir, - daemonPid: -1, - eventTsPath: filepath.Join(stateDir, eventTimestampFilename), - pastEvents: make(map[string]*containerd.Event), - } - for _, option := range options { - if err := option.Apply(r); err != nil { - return nil, err - } - } - - if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { - return nil, err - } - - if r.rpcAddr == "" { - r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) - } - - if r.startDaemon { - if err := r.runContainerdDaemon(); err != nil { - return nil, err - } - } - - // don't output the grpc reconnect logging - grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) - dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - }), - ) - conn, err := grpc.Dial(r.rpcAddr, dialOpts...) - if err != nil { - return nil, fmt.Errorf("error connecting to containerd: %v", err) - } - - r.rpcConn = conn - r.apiClient = containerd.NewAPIClient(conn) - - go r.handleConnectionChange() - - if err := r.startEventsMonitor(); err != nil { - return nil, err - } - - return r, nil -} - -func (r *remote) UpdateOptions(options ...RemoteOption) error { - for _, option := range options { - if err := option.Apply(r); err != nil { - return err - } - } - return nil -} - -func (r *remote) handleConnectionChange() { - var transientFailureCount = 0 - state := grpc.Idle - for { - s, err := r.rpcConn.WaitForStateChange(context.Background(), state) - if err != nil { - break - } - state = s - logrus.Debugf("containerd connection state change: %v", s) - - if r.daemonPid != -1 { - switch state { - case grpc.TransientFailure: - // Reset state to be notified of next failure - transientFailureCount++ - if transientFailureCount >= maxConnectionRetryCount { - transientFailureCount = 0 - if utils.IsProcessAlive(r.daemonPid) { - utils.KillProcess(r.daemonPid) - } - <-r.daemonWaitCh - if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error - logrus.Errorf("error restarting containerd: %v", err) - } - } else { - state = grpc.Idle - time.Sleep(connectionRetryDelay) - } - case grpc.Shutdown: - // Well, we asked for it to stop, just return - return - } - } - } -} - -func (r *remote) Cleanup() { - if r.daemonPid == -1 { - return - } - r.closeManually = true - r.rpcConn.Close() - // Ask the daemon to quit - syscall.Kill(r.daemonPid, syscall.SIGTERM) - - // Wait up to 15secs for it to stop - for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { - if !utils.IsProcessAlive(r.daemonPid) { - break - } - time.Sleep(time.Second) - } - - if utils.IsProcessAlive(r.daemonPid) { - logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) - syscall.Kill(r.daemonPid, syscall.SIGKILL) - } - - // cleanup some files - os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) - os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - remote: r, - exitNotifiers: make(map[string]*exitNotifier), - liveRestore: r.liveRestore, - } - - r.Lock() - r.clients = append(r.clients, c) - r.Unlock() - return c, nil -} - -func (r *remote) updateEventTimestamp(t time.Time) { - f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) - defer f.Close() - if err != nil { - logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) - return - } - - b, err := t.MarshalText() - if err != nil { - logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) - return - } - - n, err := f.Write(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) - f.Truncate(0) - return - } - -} - -func (r *remote) getLastEventTimestamp() int64 { - t := time.Now() - - fi, err := os.Stat(r.eventTsPath) - if os.IsNotExist(err) || fi.Size() == 0 { - return t.Unix() - } - - f, err := os.Open(r.eventTsPath) - defer f.Close() - if err != nil { - logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) - return t.Unix() - } - - b := make([]byte, fi.Size()) - n, err := f.Read(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) - return t.Unix() - } - - t.UnmarshalText(b) - - return t.Unix() -} - -func (r *remote) startEventsMonitor() error { - // First, get past events - er := &containerd.EventsRequest{ - Timestamp: uint64(r.getLastEventTimestamp()), - } - events, err := r.apiClient.Events(context.Background(), er) - if err != nil { - return err - } - go r.handleEventStream(events) - return nil -} - -func (r *remote) handleEventStream(events containerd.API_EventsClient) { - live := false - for { - e, err := events.Recv() - if err != nil { - if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && - r.closeManually { - // ignore error if grpc remote connection is closed manually - return - } - logrus.Errorf("failed to receive event from containerd: %v", err) - go r.startEventsMonitor() - return - } - - if live == false { - logrus.Debugf("received past containerd event: %#v", e) - - // Pause/Resume events should never happens after exit one - switch e.Type { - case StateExit: - r.pastEvents[e.Id] = e - case StatePause: - r.pastEvents[e.Id] = e - case StateResume: - r.pastEvents[e.Id] = e - case stateLive: - live = true - r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) - } - } else { - logrus.Debugf("received containerd event: %#v", e) - - var container *container - var c *client - r.RLock() - for _, c = range r.clients { - container, err = c.getContainer(e.Id) - if err == nil { - break - } - } - r.RUnlock() - if container == nil { - logrus.Errorf("no state for container: %q", err) - continue - } - - if err := container.handleEvent(e); err != nil { - logrus.Errorf("error processing state change for %s: %v", e.Id, err) - } - - r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) - } - } -} - -func (r *remote) runContainerdDaemon() error { - pidFilename := filepath.Join(r.stateDir, containerdPidFilename) - f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) - defer f.Close() - if err != nil { - return err - } - - // File exist, check if the daemon is alive - b := make([]byte, 8) - n, err := f.Read(b) - if err != nil && err != io.EOF { - return err - } - - if n > 0 { - pid, err := strconv.ParseUint(string(b[:n]), 10, 64) - if err != nil { - return err - } - if utils.IsProcessAlive(int(pid)) { - logrus.Infof("previous instance of containerd still alive (%d)", pid) - r.daemonPid = int(pid) - return nil - } - } - - // rewind the file - _, err = f.Seek(0, os.SEEK_SET) - if err != nil { - return err - } - - // Truncate it - err = f.Truncate(0) - if err != nil { - return err - } - - // Start a new instance - args := []string{ - "-l", fmt.Sprintf("unix://%s", r.rpcAddr), - "--shim", "docker-containerd-shim", - "--metrics-interval=0", - "--start-timeout", "2m", - "--state-dir", filepath.Join(r.stateDir, containerdStateDir), - } - if r.runtime != "" { - args = append(args, "--runtime") - args = append(args, r.runtime) - } - if r.debugLog { - args = append(args, "--debug") - } - if len(r.runtimeArgs) > 0 { - for _, v := range r.runtimeArgs { - args = append(args, "--runtime-args") - args = append(args, v) - } - logrus.Debugf("runContainerdDaemon: runtimeArgs: %s", args) - } - - cmd := exec.Command(containerdBinary, args...) - // redirect containerd logs to docker logs - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true, Pdeathsig: syscall.SIGKILL} - cmd.Env = nil - // clear the NOTIFY_SOCKET from the env when starting containerd - for _, e := range os.Environ() { - if !strings.HasPrefix(e, "NOTIFY_SOCKET") { - cmd.Env = append(cmd.Env, e) - } - } - if err := cmd.Start(); err != nil { - return err - } - logrus.Infof("New containerd process, pid: %d", cmd.Process.Pid) - if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - - r.daemonWaitCh = make(chan struct{}) - go func() { - cmd.Wait() - close(r.daemonWaitCh) - }() // Reap our child when needed - r.daemonPid = cmd.Process.Pid - return nil -} - -func setOOMScore(pid, score int) error { - f, err := os.OpenFile(fmt.Sprintf("/proc/%d/oom_score_adj", pid), os.O_WRONLY, 0) - if err != nil { - return err - } - _, err = f.WriteString(strconv.Itoa(score)) - f.Close() - return err -} - -// WithRemoteAddr sets the external containerd socket to connect to. -func WithRemoteAddr(addr string) RemoteOption { - return rpcAddr(addr) -} - -type rpcAddr string - -func (a rpcAddr) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.rpcAddr = string(a) - return nil - } - return fmt.Errorf("WithRemoteAddr option not supported for this remote") -} - -// WithRuntimePath sets the path of the runtime to be used as the -// default by containerd -func WithRuntimePath(rt string) RemoteOption { - return runtimePath(rt) -} - -type runtimePath string - -func (rt runtimePath) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtime = string(rt) - return nil - } - return fmt.Errorf("WithRuntime option not supported for this remote") -} - -// WithRuntimeArgs sets the list of runtime args passed to containerd -func WithRuntimeArgs(args []string) RemoteOption { - return runtimeArgs(args) -} - -type runtimeArgs []string - -func (rt runtimeArgs) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtimeArgs = rt - return nil - } - return fmt.Errorf("WithRuntimeArgs option not supported for this remote") -} - -// WithStartDaemon defines if libcontainerd should also run containerd daemon. -func WithStartDaemon(start bool) RemoteOption { - return startDaemon(start) -} - -type startDaemon bool - -func (s startDaemon) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.startDaemon = bool(s) - return nil - } - return fmt.Errorf("WithStartDaemon option not supported for this remote") -} - -// WithDebugLog defines if containerd debug logs will be enabled for daemon. -func WithDebugLog(debug bool) RemoteOption { - return debugLog(debug) -} - -type debugLog bool - -func (d debugLog) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.debugLog = bool(d) - return nil - } - return fmt.Errorf("WithDebugLog option not supported for this remote") -} - -// WithLiveRestore defines if containers are stopped on shutdown or restored. -func WithLiveRestore(v bool) RemoteOption { - return liveRestore(v) -} - -type liveRestore bool - -func (l liveRestore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.liveRestore = bool(l) - for _, c := range remote.clients { - c.liveRestore = bool(l) - } - return nil - } - return fmt.Errorf("WithLiveRestore option not supported for this remote") -} - -// WithOOMScore defines the oom_score_adj to set for the containerd process. -func WithOOMScore(score int) RemoteOption { - return oomScore(score) -} - -type oomScore int - -func (o oomScore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.oomScore = int(o) - return nil - } - return fmt.Errorf("WithOOMScore option not supported for this remote") -} diff --git a/libcontainerd/remote_solaris.go b/libcontainerd/remote_solaris.go deleted file mode 100644 index e04f19288..000000000 --- a/libcontainerd/remote_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/pkg/locker" - -type remote struct { -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - } - return c, nil -} - -func (r *remote) Cleanup() { -} - -func (r *remote) UpdateOptions(opts ...RemoteOption) error { - return nil -} - -// New creates a fresh instance of libcontainerd remote. -func New(_ string, _ ...RemoteOption) (Remote, error) { - return &remote{}, nil -} - -// WithLiveRestore is a noop on solaris. -func WithLiveRestore(v bool) RemoteOption { - return nil -} diff --git a/libcontainerd/remote_windows.go b/libcontainerd/remote_windows.go deleted file mode 100644 index 74c10447b..000000000 --- a/libcontainerd/remote_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/pkg/locker" - -type remote struct { -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - } - return c, nil -} - -// Cleanup is a no-op on Windows. It is here to implement the interface. -func (r *remote) Cleanup() { -} - -func (r *remote) UpdateOptions(opts ...RemoteOption) error { - return nil -} - -// New creates a fresh instance of libcontainerd remote. On Windows, -// this is not used as there is no remote containerd process. -func New(_ string, _ ...RemoteOption) (Remote, error) { - return &remote{}, nil -} - -// WithLiveRestore is a noop on windows. -func WithLiveRestore(v bool) RemoteOption { - return nil -} diff --git a/libcontainerd/types.go b/libcontainerd/types.go deleted file mode 100644 index 357ca1bd4..000000000 --- a/libcontainerd/types.go +++ /dev/null @@ -1,60 +0,0 @@ -package libcontainerd - -import "io" - -// State constants used in state change reporting. -const ( - StateStart = "start-container" - StatePause = "pause" - StateResume = "resume" - StateExit = "exit" - StateRestart = "restart" - StateRestore = "restore" - StateStartProcess = "start-process" - StateExitProcess = "exit-process" - StateOOM = "oom" // fake state - stateLive = "live" -) - -// CommonStateInfo contains the state info common to all platforms. -type CommonStateInfo struct { // FIXME: event? - State string - Pid uint32 - ExitCode uint32 - ProcessID string -} - -// Backend defines callbacks that the client of the library needs to implement. -type Backend interface { - StateChanged(containerID string, state StateInfo) error - AttachStreams(processFriendlyName string, io IOPipe) error -} - -// Client provides access to containerd features. -type Client interface { - Create(containerID string, spec Spec, options ...CreateOption) error - Signal(containerID string, sig int) error - SignalProcess(containerID string, processFriendlyName string, sig int) error - AddProcess(containerID, processFriendlyName string, process Process) error - Resize(containerID, processFriendlyName string, width, height int) error - Pause(containerID string) error - Resume(containerID string) error - Restore(containerID string, options ...CreateOption) error - Stats(containerID string) (*Stats, error) - GetPidsForContainer(containerID string) ([]int, error) - Summary(containerID string) ([]Summary, error) - UpdateResources(containerID string, resources Resources) error -} - -// CreateOption allows to configure parameters of container creation. -type CreateOption interface { - Apply(interface{}) error -} - -// IOPipe contains the stdio streams. -type IOPipe struct { - Stdin io.WriteCloser - Stdout io.Reader - Stderr io.Reader - Terminal bool // Whether stderr is connected on Windows -} diff --git a/libcontainerd/types_linux.go b/libcontainerd/types_linux.go deleted file mode 100644 index 4f714a232..000000000 --- a/libcontainerd/types_linux.go +++ /dev/null @@ -1,55 +0,0 @@ -package libcontainerd - -import ( - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/specs/specs-go" -) - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec specs.Spec - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // User specifies user information for the process. - User *User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd *string `json:"cwd"` - // Capabilities are linux capabilities that are kept for the container. - Capabilities []string `json:"capabilities,omitempty"` - // Rlimits specifies rlimit options to apply to the process. - Rlimits []specs.Rlimit `json:"rlimits,omitempty"` - // ApparmorProfile specified the apparmor profile for the container. - ApparmorProfile *string `json:"apparmorProfile,omitempty"` - // SelinuxProcessLabel specifies the selinux context that the container process is run as. - SelinuxLabel *string `json:"selinuxLabel,omitempty"` -} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - OOMKilled bool -} - -// Stats contains a stats properties from containerd. -type Stats containerd.StatsResponse - -// Summary container a container summary from containerd -type Summary struct{} - -// User specifies linux specific user and group information for the container's -// main process. -type User specs.User - -// Resources defines updatable container resource values. -type Resources containerd.UpdateResource diff --git a/libcontainerd/types_solaris.go b/libcontainerd/types_solaris.go deleted file mode 100644 index 637e54300..000000000 --- a/libcontainerd/types_solaris.go +++ /dev/null @@ -1,38 +0,0 @@ -package libcontainerd - -import ( - "github.com/opencontainers/specs/specs-go" -) - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec specs.Spec - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` -} - -// Stats contains a stats properties from containerd. -type Stats struct{} - -// Summary container a container summary from containerd -type Summary struct{} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo -} - -// User specifies Solaris specific user and group information for the container's -// main process. -type User specs.User - -// Resources defines updatable container resource values. -type Resources struct{} diff --git a/libcontainerd/types_windows.go b/libcontainerd/types_windows.go deleted file mode 100644 index 1e7014c11..000000000 --- a/libcontainerd/types_windows.go +++ /dev/null @@ -1,39 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/libcontainerd/windowsoci" - -// Spec is the base configuration for the container. -type Spec windowsoci.WindowsSpec - -// Process contains information to start a specific application inside the container. -type Process windowsoci.Process - -// User specifies user information for the containers main process. -type User windowsoci.User - -// Summary container a container summary from containerd -type Summary struct { - Pid uint32 - Command string -} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - - UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. -} - -// Stats contains a stats properties from containerd. -type Stats struct{} - -// Resources defines updatable container resource values. -type Resources struct{} - -// ServicingOption is an empty CreateOption with a no-op application that siginifies -// the container needs to be use for a Windows servicing operation. -type ServicingOption struct { - IsServicing bool -} diff --git a/libcontainerd/utils_linux.go b/libcontainerd/utils_linux.go deleted file mode 100644 index 5b67244f0..000000000 --- a/libcontainerd/utils_linux.go +++ /dev/null @@ -1,52 +0,0 @@ -package libcontainerd - -import ( - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/specs/specs-go" -) - -func getRootIDs(s specs.Spec) (int, int, error) { - var hasUserns bool - for _, ns := range s.Linux.Namespaces { - if ns.Type == specs.UserNamespace { - hasUserns = true - break - } - } - if !hasUserns { - return 0, 0, nil - } - uid := hostIDFromMap(0, s.Linux.UIDMappings) - gid := hostIDFromMap(0, s.Linux.GIDMappings) - return uid, gid, nil -} - -func hostIDFromMap(id uint32, mp []specs.IDMapping) int { - for _, m := range mp { - if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { - return int(m.HostID + id - m.ContainerID) - } - } - return 0 -} - -func systemPid(ctr *containerd.Container) uint32 { - var pid uint32 - for _, p := range ctr.Processes { - if p.Pid == InitFriendlyName { - pid = p.SystemPid - } - } - return pid -} - -func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { - for _, r := range sr { - cr = append(cr, &containerd.Rlimit{ - Type: r.Type, - Hard: r.Hard, - Soft: r.Soft, - }) - } - return -} diff --git a/libcontainerd/utils_windows.go b/libcontainerd/utils_windows.go deleted file mode 100644 index 1839dc19a..000000000 --- a/libcontainerd/utils_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -package libcontainerd - -import ( - "strconv" - "strings" -) - -// setupEnvironmentVariables convert a string array of environment variables -// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. -func setupEnvironmentVariables(a []string) map[string]string { - r := make(map[string]string) - for _, s := range a { - arr := strings.Split(s, "=") - if len(arr) == 2 { - r[arr[0]] = arr[1] - } - } - return r -} - -// Apply for a servicing option is a no-op. -func (s *ServicingOption) Apply(interface{}) error { - return nil -} - -// buildFromVersion takes an image version string and returns the Windows build -// number. It returns 0 if the build number is not present. -func buildFromVersion(osver string) int { - v := strings.Split(osver, ".") - if len(v) < 3 { - return 0 - } - if build, err := strconv.Atoi(v[2]); err == nil { - return build - } - return 0 -} diff --git a/libcontainerd/windowsoci/oci_windows.go b/libcontainerd/windowsoci/oci_windows.go deleted file mode 100644 index 5f8f1319d..000000000 --- a/libcontainerd/windowsoci/oci_windows.go +++ /dev/null @@ -1,179 +0,0 @@ -package windowsoci - -// This file contains the Windows spec for a container. At the time of -// writing, Windows does not have a spec defined in opencontainers/specs, -// hence this is an interim workaround. TODO Windows: FIXME @jhowardmsft - -import "fmt" - -// WindowsSpec is the full specification for Windows containers. -type WindowsSpec struct { - Spec - - // Windows is platform specific configuration for Windows based containers. - Windows Windows `json:"windows"` -} - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec struct { - - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // Platform is the host information for OS and Arch. - Platform Platform `json:"platform"` - // Process is the container's main process. - Process Process `json:"process"` - // Root is the root information for the container's filesystem. - Root Root `json:"root"` - // Hostname is the container's host name. - Hostname string `json:"hostname,omitempty"` - // Mounts profile configuration for adding mounts to the container's filesystem. - Mounts []Mount `json:"mounts"` -} - -// Windows contains platform specific configuration for Windows based containers. -type Windows struct { - // Resources contain information for handling resource constraints for the container - Resources *Resources `json:"resources,omitempty"` - // Networking contains the platform specific network settings for the container. - Networking *Networking `json:"networking,omitempty"` - // FirstStart is used for an optimization on first boot of Windows - FirstStart bool `json:"first_start,omitempty"` - // LayerFolder is the path to the current layer folder - LayerFolder string `json:"layer_folder,omitempty"` - // Layer paths of the parent layers - LayerPaths []string `json:"layer_paths,omitempty"` - // HvRuntime contains settings specific to Hyper-V containers, omitted if not using Hyper-V isolation - HvRuntime *HvRuntime `json:"hv_runtime,omitempty"` -} - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal indicates if stderr should NOT be attached for the container. - Terminal bool `json:"terminal"` - // ConsoleSize contains the initial h,w of the console size - InitialConsoleSize [2]int `json:"-"` - // User specifies user information for the process. - User User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd string `json:"cwd"` -} - -// User contains the user information for Windows -type User struct { - User string `json:"user,omitempty"` -} - -// Root contains information about the container's root filesystem on the host. -type Root struct { - // Path is the absolute path to the container's root filesystem. - Path string `json:"path"` - // Readonly makes the root filesystem for the container readonly before the process is executed. - Readonly bool `json:"readonly"` -} - -// Platform specifies OS and arch information for the host system that the container -// is created for. -type Platform struct { - // OS is the operating system. - OS string `json:"os"` - // Arch is the architecture - Arch string `json:"arch"` - // OSVersion is the version of the operating system. - OSVersion string `json:"os.version,omitempty"` -} - -// Mount specifies a mount for a container. -type Mount struct { - // Destination is the path where the mount will be placed relative to the container's root. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point. - Destination string `json:"destination"` - // Type specifies the mount kind. - Type string `json:"type"` - // Source specifies the source path of the mount. In the case of bind mounts - // this would be the file on the host. - Source string `json:"source"` - // Readonly specifies if the mount should be read-only - Readonly bool `json:"readonly"` -} - -// HvRuntime contains settings specific to Hyper-V containers -type HvRuntime struct { - // ImagePath is the path to the Utility VM image for this container - ImagePath string `json:"image_path,omitempty"` -} - -// Networking contains the platform specific network settings for the container -type Networking struct { - // List of endpoints to be attached to the container - EndpointList []string `json:"endpoints,omitempty"` -} - -// Storage contains storage resource management settings -type Storage struct { - // Specifies maximum Iops for the system drive - Iops *uint64 `json:"iops,omitempty"` - // Specifies maximum bytes per second for the system drive - Bps *uint64 `json:"bps,omitempty"` - // Sandbox size indicates the size to expand the system drive to if it is currently smaller - SandboxSize *uint64 `json:"sandbox_size,omitempty"` -} - -// Memory contains memory settings for the container -type Memory struct { - // Memory limit (in bytes). - Limit *int64 `json:"limit,omitempty"` - // Memory reservation (in bytes). - Reservation *uint64 `json:"reservation,omitempty"` -} - -// CPU contains information for cpu resource management -type CPU struct { - // Number of CPUs available to the container. This is an appoximation for Windows Server Containers. - Count *uint64 `json:"count,omitempty"` - // CPU shares (relative weight (ratio) vs. other containers with cpu shares). Range is from 1 to 10000. - Shares *uint64 `json:"shares,omitempty"` - // Percent of available CPUs usable by the container. - Percent *int64 `json:"percent,omitempty"` -} - -// Network network resource management information -type Network struct { - // Bandwidth is the maximum egress bandwidth in bytes per second - Bandwidth *uint64 `json:"bandwidth,omitempty"` -} - -// Resources has container runtime resource constraints -// TODO Windows containerd. This structure needs ratifying with the old resources -// structure used on Windows and the latest OCI spec. -type Resources struct { - // Memory restriction configuration - Memory *Memory `json:"memory,omitempty"` - // CPU resource restriction configuration - CPU *CPU `json:"cpu,omitempty"` - // Storage restriction configuration - Storage *Storage `json:"storage,omitempty"` - // Network restriction configuration - Network *Network `json:"network,omitempty"` -} - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 0 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 3 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s (Windows)", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/libcontainerd/windowsoci/unsupported.go b/libcontainerd/windowsoci/unsupported.go deleted file mode 100644 index a97c28299..000000000 --- a/libcontainerd/windowsoci/unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !windows - -package windowsoci diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go deleted file mode 100644 index 3c0e550d0..000000000 --- a/migrate/v1/migratev1.go +++ /dev/null @@ -1,504 +0,0 @@ -package v1 - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "sync" - "time" - - "encoding/json" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/image" - imagev1 "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" -) - -type graphIDRegistrar interface { - RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type graphIDMounter interface { - CreateRWLayerByGraphID(string, string, layer.ChainID) error -} - -type checksumCalculator interface { - ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) -} - -const ( - graphDirName = "graph" - tarDataFileName = "tar-data.json.gz" - migrationFileName = ".migration-v1-images.json" - migrationTagsFileName = ".migration-v1-tags" - migrationDiffIDFileName = ".migration-diffid" - migrationSizeFileName = ".migration-size" - migrationTarDataFileName = ".migration-tardata" - containersDirName = "containers" - configFileNameLegacy = "config.json" - configFileName = "config.v2.json" - repositoriesFilePrefixLegacy = "repositories-" -) - -var ( - errUnsupported = errors.New("migration is not supported") -) - -// Migrate takes an old graph directory and transforms the metadata into the -// new format. -func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { - graphDir := filepath.Join(root, graphDirName) - if _, err := os.Lstat(graphDir); os.IsNotExist(err) { - return nil - } - - mappings, err := restoreMappings(root) - if err != nil { - return err - } - - if cc, ok := ls.(checksumCalculator); ok { - CalculateLayerChecksums(root, cc, mappings) - } - - if registrar, ok := ls.(graphIDRegistrar); !ok { - return errUnsupported - } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { - return err - } - - err = saveMappings(root, mappings) - if err != nil { - return err - } - - if mounter, ok := ls.(graphIDMounter); !ok { - return errUnsupported - } else if err := migrateContainers(root, mounter, is, mappings); err != nil { - return err - } - - if err := migrateRefs(root, driverName, rs, mappings); err != nil { - return err - } - - return nil -} - -// CalculateLayerChecksums walks an old graph directory and calculates checksums -// for each layer. These checksums are later used for migration. -func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { - graphDir := filepath.Join(root, graphDirName) - // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io - workers := runtime.NumCPU() * 3 - workQueue := make(chan string, workers) - - wg := sync.WaitGroup{} - - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - for id := range workQueue { - start := time.Now() - if err := calculateLayerChecksum(graphDir, id, ls); err != nil { - logrus.Errorf("could not calculate checksum for %q, %q", id, err) - } - elapsed := time.Since(start) - logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) - } - wg.Done() - }() - } - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - logrus.Errorf("could not read directory %q", graphDir) - return - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, ok := mappings[v1ID]; ok { // support old migrations without helper files - continue - } - workQueue <- v1ID - } - close(workQueue) - wg.Wait() -} - -func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { - diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) - if _, err := os.Lstat(diffIDFile); err == nil { - return nil - } else if !os.IsNotExist(err) { - return err - } - - parent, err := getParent(filepath.Join(graphDir, id)) - if err != nil { - return err - } - - diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { - return err - } - - if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { - return err - } - - logrus.Infof("calculated checksum for layer %s: %s", id, diffID) - return nil -} - -func restoreMappings(root string) (map[string]image.ID, error) { - mappings := make(map[string]image.ID) - - mfile := filepath.Join(root, migrationFileName) - f, err := os.Open(mfile) - if err != nil && !os.IsNotExist(err) { - return nil, err - } else if err == nil { - err := json.NewDecoder(f).Decode(&mappings) - if err != nil { - f.Close() - return nil, err - } - f.Close() - } - - return mappings, nil -} - -func saveMappings(root string, mappings map[string]image.ID) error { - mfile := filepath.Join(root, migrationFileName) - f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - if err := json.NewEncoder(f).Encode(mappings); err != nil { - return err - } - return nil -} - -func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { - graphDir := filepath.Join(root, graphDirName) - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - return err - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, exists := mappings[v1ID]; exists { - continue - } - if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { - continue - } - } - - return nil -} - -func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { - containersDir := filepath.Join(root, containersDirName) - dir, err := ioutil.ReadDir(containersDir) - if err != nil { - return err - } - for _, v := range dir { - id := v.Name() - - if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { - continue - } - - containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) - if err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(containerJSON, &c); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageStrJSON, ok := c["Image"] - if !ok { - return fmt.Errorf("invalid container configuration for %v", id) - } - - var image string - if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageID, ok := imageMappings[image] - if !ok { - logrus.Errorf("image not migrated %v", imageID) // non-fatal error - continue - } - - c["Image"] = rawJSON(imageID) - - containerJSON, err = json.Marshal(c) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { - return err - } - - img, err := is.Get(imageID) - if err != nil { - return err - } - - if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - logrus.Infof("migrated container %s to point to %s", id, imageID) - - } - return nil -} - -type refAdder interface { - AddTag(ref reference.Named, id image.ID, force bool) error - AddDigest(ref reference.Canonical, id image.ID, force bool) error -} - -func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { - migrationFile := filepath.Join(root, migrationTagsFileName) - if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { - return err - } - - type repositories struct { - Repositories map[string]map[string]string - } - - var repos repositories - - f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&repos); err != nil { - return err - } - - for name, repo := range repos.Repositories { - for tag, id := range repo { - if strongID, exists := mappings[id]; exists { - ref, err := reference.WithName(name) - if err != nil { - logrus.Errorf("migrate tags: invalid name %q, %q", name, err) - continue - } - if dgst, err := digest.ParseDigest(tag); err == nil { - canonical, err := reference.WithDigest(ref, dgst) - if err != nil { - logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) - continue - } - if err := rs.AddDigest(canonical, strongID, false); err != nil { - logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) - } - } else { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) - continue - } - if err := rs.AddTag(tagRef, strongID, false); err != nil { - logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) - } - } - logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) - } - } - } - - mf, err := os.Create(migrationFile) - if err != nil { - return err - } - mf.Close() - - return nil -} - -func getParent(confDir string) (string, error) { - jsonFile := filepath.Join(confDir, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return "", err - } - var parent struct { - Parent string - ParentID digest.Digest `json:"parent_id"` - } - if err := json.Unmarshal(imageJSON, &parent); err != nil { - return "", err - } - if parent.Parent == "" && parent.ParentID != "" { // v1.9 - parent.Parent = parent.ParentID.Hex() - } - // compatibilityID for parent - parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) - if err == nil && len(parentCompatibilityID) > 0 { - parent.Parent = string(parentCompatibilityID) - } - return parent.Parent, nil -} - -func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { - defer func() { - if err != nil { - logrus.Errorf("migration failed for %v, err: %v", id, err) - } - }() - - parent, err := getParent(filepath.Join(root, graphDirName, id)) - if err != nil { - return err - } - - var parentID image.ID - if parent != "" { - var exists bool - if parentID, exists = mappings[parent]; !exists { - if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { - // todo: fail or allow broken chains? - return err - } - parentID = mappings[parent] - } - } - - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) - if err != nil { - return err - } - diffID, err := digest.ParseDigest(string(diffIDData)) - if err != nil { - return err - } - - sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) - if err != nil { - return err - } - size, err := strconv.ParseInt(string(sizeStr), 10, 64) - if err != nil { - return err - } - - layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) - if err != nil { - return err - } - logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) - - jsonFile := filepath.Join(root, graphDirName, id, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return err - } - - h, err := imagev1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - rootFS.Append(layer.DiffID()) - - config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - strongID, err := is.Create(config) - if err != nil { - return err - } - logrus.Infof("migrated image %s to %s", id, strongID) - - if parentID != "" { - if err := is.SetParent(strongID, parentID); err != nil { - return err - } - } - - checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) - if err == nil { // best effort - dgst, err := digest.ParseDigest(string(checksum)) - if err == nil { - V2MetadataService := metadata.NewV2MetadataService(ms) - V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) - } - } - _, err = ls.Release(layer) - if err != nil { - return err - } - - mappings[id] = strongID - return -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go deleted file mode 100644 index 88f6bbdfa..000000000 --- a/migrate/v1/migratev1_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package v1 - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" -) - -func TestMigrateRefs(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-tags") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) - - ta := &mockTagAdder{} - err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ - "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), - "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), - "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), - }) - if err != nil { - t.Fatal(err) - } - - expected := map[string]string{ - "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", - } - - if !reflect.DeepEqual(expected, ta.refs) { - t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) - } - - // second migration is no-op - ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) - err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ - "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), - }) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, ta.refs) { - t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) - } -} - -func TestMigrateContainers(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - if runtime.GOARCH != "amd64" { - t.Skip("Test tailored to amd64 architecture") - } - tmpdir, err := ioutil.TempDir("", "migrate-containers") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) - if err != nil { - t.Fatal(err) - } - - // container with invalid image - err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) - if err != nil { - t.Fatal(err) - } - - ls := &mockMounter{} - - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) - if err != nil { - t.Fatal(err) - } - - is, err := image.NewImageStore(ifs, ls) - if err != nil { - t.Fatal(err) - } - - imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) - if err != nil { - t.Fatal(err) - } - - err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ - "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, - }) - if err != nil { - t.Fatal(err) - } - - expected := []mountInfo{{ - "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", - "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", - "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", - }} - if !reflect.DeepEqual(expected, ls.mounts) { - t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) - } - - if actual, expected := ls.count, 0; actual != expected { - t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) - } - - config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) - if err != nil { - t.Fatal(err) - } - var config struct{ Image string } - err = json.Unmarshal(config2, &config) - if err != nil { - t.Fatal(err) - } - - if actual, expected := config.Image, string(imgID); actual != expected { - t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) - } - -} - -func TestMigrateImages(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - if runtime.GOARCH != "amd64" { - t.Skip("Test tailored to amd64 architecture") - } - tmpdir, err := ioutil.TempDir("", "migrate-images") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // busybox from 1.9 - id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") - if err != nil { - t.Fatal(err) - } - - id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") - if err != nil { - t.Fatal(err) - } - - ls := &mockRegistrar{} - - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) - if err != nil { - t.Fatal(err) - } - - is, err := image.NewImageStore(ifs, ls) - if err != nil { - t.Fatal(err) - } - - ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) - if err != nil { - t.Fatal(err) - } - mappings := make(map[string]image.ID) - - err = migrateImages(tmpdir, ls, is, ms, mappings) - if err != nil { - t.Fatal(err) - } - - expected := map[string]image.ID{ - id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), - id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), - } - - if !reflect.DeepEqual(mappings, expected) { - t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) - } - - if actual, expected := ls.count, 2; actual != expected { - t.Fatalf("invalid register count: expected %q, got %q", expected, actual) - } - ls.count = 0 - - // next images are busybox from 1.8.2 - _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - if err != nil { - t.Fatal(err) - } - - _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") - if err != nil { - t.Fatal(err) - } - - err = migrateImages(tmpdir, ls, is, ms, mappings) - if err != nil { - t.Fatal(err) - } - - expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") - expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") - - if actual, expected := ls.count, 2; actual != expected { - t.Fatalf("invalid register count: expected %q, got %q", expected, actual) - } - - v2MetadataService := metadata.NewV2MetadataService(ms) - receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) - if err != nil { - t.Fatal(err) - } - - expectedMetadata := []metadata.V2Metadata{ - {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, - {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - } - - if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { - t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) - } - -} - -func TestMigrateUnsupported(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-empty") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) - if err != nil { - t.Fatal(err) - } - - err = Migrate(tmpdir, "generic", nil, nil, nil, nil) - if err != errUnsupported { - t.Fatalf("expected unsupported error, got %q", err) - } -} - -func TestMigrateEmptyDir(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-empty") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = Migrate(tmpdir, "generic", nil, nil, nil, nil) - if err != nil { - t.Fatal(err) - } -} - -func addImage(dest, jsonConfig, parent, checksum string) (string, error) { - var config struct{ ID string } - if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { - return "", err - } - if config.ID == "" { - b := make([]byte, 32) - rand.Read(b) - config.ID = hex.EncodeToString(b) - } - contDir := filepath.Join(dest, "graph", config.ID) - if err := os.MkdirAll(contDir, 0700); err != nil { - return "", err - } - if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { - return "", err - } - if checksum != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { - return "", err - } - } - if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { - return "", err - } - if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { - return "", err - } - if parent != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { - return "", err - } - } - if checksum != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { - return "", err - } - } - return config.ID, nil -} - -func addContainer(dest, jsonConfig string) error { - var config struct{ ID string } - if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { - return err - } - contDir := filepath.Join(dest, "containers", config.ID) - if err := os.MkdirAll(contDir, 0700); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { - return err - } - return nil -} - -type mockTagAdder struct { - refs map[string]string -} - -func (t *mockTagAdder) AddTag(ref reference.Named, id image.ID, force bool) error { - if t.refs == nil { - t.refs = make(map[string]string) - } - t.refs[ref.String()] = id.String() - return nil -} -func (t *mockTagAdder) AddDigest(ref reference.Canonical, id image.ID, force bool) error { - return t.AddTag(ref, id, force) -} - -type mockRegistrar struct { - layers map[layer.ChainID]*mockLayer - count int -} - -func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { - r.count++ - l := &mockLayer{} - if parent != "" { - p, exists := r.layers[parent] - if !exists { - return nil, fmt.Errorf("invalid parent %q", parent) - } - l.parent = p - l.diffIDs = append(l.diffIDs, p.diffIDs...) - } - l.diffIDs = append(l.diffIDs, diffID) - if r.layers == nil { - r.layers = make(map[layer.ChainID]*mockLayer) - } - r.layers[l.ChainID()] = l - return l, nil -} -func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} -func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -type mountInfo struct { - name, graphID, parent string -} -type mockMounter struct { - mounts []mountInfo - count int -} - -func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { - r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) - return nil -} -func (r *mockMounter) Unmount(string) error { - r.count-- - return nil -} -func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} - -type mockLayer struct { - diffIDs []layer.DiffID - parent *mockLayer -} - -func (l *mockLayer) TarStream() (io.ReadCloser, error) { - return nil, nil -} - -func (l *mockLayer) ChainID() layer.ChainID { - return layer.CreateChainID(l.diffIDs) -} - -func (l *mockLayer) DiffID() layer.DiffID { - return l.diffIDs[len(l.diffIDs)-1] -} - -func (l *mockLayer) Parent() layer.Layer { - if l.parent == nil { - return nil - } - return l.parent -} - -func (l *mockLayer) Size() (int64, error) { - return 0, nil -} - -func (l *mockLayer) DiffSize() (int64, error) { - return 0, nil -} - -func (l *mockLayer) Metadata() (map[string]string, error) { - return nil, nil -} diff --git a/oci/defaults_linux.go b/oci/defaults_linux.go deleted file mode 100644 index 796166bdb..000000000 --- a/oci/defaults_linux.go +++ /dev/null @@ -1,177 +0,0 @@ -package oci - -import ( - "os" - "runtime" - - "github.com/opencontainers/specs/specs-go" -) - -func sPtr(s string) *string { return &s } -func iPtr(i int64) *int64 { return &i } -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - s.Mounts = []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - } - - s.Process.Capabilities = []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - } - - s.Linux = specs.Linux{ - MaskedPaths: []string{ - "/proc/kcore", - "/proc/latency_stats", - "/proc/timer_stats", - "/proc/sched_debug", - }, - ReadonlyPaths: []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - }, - Namespaces: []specs.Namespace{ - {Type: "mount"}, - {Type: "network"}, - {Type: "uts"}, - {Type: "pid"}, - {Type: "ipc"}, - }, - // Devices implicitly contains the following devices: - // null, zero, full, random, urandom, tty, console, and ptmx. - // ptmx is a bind-mount or symlink of the container's ptmx. - // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.Device{ - { - Type: "c", - Path: "/dev/fuse", - Major: 10, - Minor: 229, - FileMode: fmPtr(0666), - UID: u32Ptr(0), - GID: u32Ptr(0), - }, - }, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ - { - Allow: false, - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(5), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(3), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(9), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(8), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(0), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(1), - Access: sPtr("rwm"), - }, - { - Allow: false, - Type: sPtr("c"), - Major: iPtr(10), - Minor: iPtr(229), - Access: sPtr("rwm"), - }, - }, - }, - } - - return s -} diff --git a/oci/defaults_solaris.go b/oci/defaults_solaris.go deleted file mode 100644 index f3ed5c9c7..000000000 --- a/oci/defaults_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package oci - -import ( - "github.com/opencontainers/specs/specs-go" -) - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{} - return s -} diff --git a/oci/defaults_windows.go b/oci/defaults_windows.go deleted file mode 100644 index 03dc942eb..000000000 --- a/oci/defaults_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/docker/docker/libcontainerd/windowsoci" -) - -// DefaultSpec returns default spec used by docker. -func DefaultSpec() windowsoci.WindowsSpec { - s := windowsoci.Spec{ - Version: windowsoci.Version, - Platform: windowsoci.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - - return windowsoci.WindowsSpec{ - Spec: s, - Windows: windowsoci.Windows{}, - } -} diff --git a/opts/hosts.go b/opts/hosts.go deleted file mode 100644 index 266df1e53..000000000 --- a/opts/hosts.go +++ /dev/null @@ -1,151 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} diff --git a/opts/hosts_test.go b/opts/hosts_test.go deleted file mode 100644 index a5bec30d4..000000000 --- a/opts/hosts_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package opts - -import ( - "fmt" - "testing" -) - -func TestParseHost(t *testing.T) { - invalid := []string{ - "something with spaces", - "://", - "unknown://", - "tcp://:port", - "tcp://invalid:port", - } - - valid := map[string]string{ - "": DefaultHost, - " ": DefaultHost, - " ": DefaultHost, - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), - "tcp://": DefaultTCPHost, - "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), - "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix://" + DefaultUnixSocket, - "unix://path/to/socket": "unix://path/to/socket", - "npipe://": "npipe://" + DefaultNamedPipe, - "npipe:////./pipe/foo": "npipe:////./pipe/foo", - } - - for _, value := range invalid { - if _, err := ParseHost(false, value); err == nil { - t.Errorf("Expected an error for %v, got [nil]", value) - } - } - - for value, expected := range valid { - if actual, err := ParseHost(false, value); err != nil || actual != expected { - t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func TestParseDockerDaemonHost(t *testing.T) { - invalids := map[string]string{ - - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", - " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", - "": "Invalid bind address format: ", - } - valids := map[string]string{ - "0.0.0.1:": "tcp://0.0.0.1:2375", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - "[::1]:": "tcp://[::1]:2375", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), - ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), - "tcp://": DefaultTCPHost, - "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), - "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix://" + DefaultUnixSocket, - "fd://": "fd://", - "fd://something": "fd://something", - "localhost:": "tcp://localhost:2375", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseTCP(t *testing.T) { - var ( - defaultHTTPHost = "tcp://127.0.0.1:2376" - ) - invalids := map[string]string{ - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "": defaultHTTPHost, - "tcp://": defaultHTTPHost, - "0.0.0.1:": "tcp://0.0.0.1:2376", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "[::1]:": "tcp://[::1]:2376", - "[::1]:5555": "tcp://[::1]:5555", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", - "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - "localhost:": "tcp://localhost:2376", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { - t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") - } -} diff --git a/opts/hosts_unix.go b/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go deleted file mode 100644 index 7c239e00f..000000000 --- a/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/opts/ip.go b/opts/ip.go deleted file mode 100644 index c7b0dc994..000000000 --- a/opts/ip.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/opts/ip_test.go b/opts/ip_test.go deleted file mode 100644 index 1027d84a0..000000000 --- a/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIPOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IPOpt{IP: &ip} - - invalidIP := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIP) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go deleted file mode 100644 index 507298f42..000000000 --- a/pkg/aaparser/aaparser.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package aaparser is a convenience package interacting with `apparmor_parser`. -package aaparser - -import ( - "fmt" - "os/exec" - "path/filepath" - "strconv" - "strings" -) - -const ( - binary = "apparmor_parser" -) - -// GetVersion returns the major and minor version of apparmor_parser. -func GetVersion() (int, error) { - output, err := cmd("", "--version") - if err != nil { - return -1, err - } - - return parseVersion(output) -} - -// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to -// replace and write it to disk. -func LoadProfile(profilePath string) error { - _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath)) - if err != nil { - return err - } - return nil -} - -// cmd runs `apparmor_parser` with the passed arguments. -func cmd(dir string, arg ...string) (string, error) { - c := exec.Command(binary, arg...) - c.Dir = dir - - output, err := c.CombinedOutput() - if err != nil { - return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) - } - - return string(output), nil -} - -// parseVersion takes the output from `apparmor_parser --version` and returns -// a representation of the {major, minor, patch} version as a single number of -// the form MMmmPPP {major, minor, patch}. -func parseVersion(output string) (int, error) { - // output is in the form of the following: - // AppArmor parser version 2.9.1 - // Copyright (C) 1999-2008 Novell Inc. - // Copyright 2009-2012 Canonical Ltd. - - lines := strings.SplitN(output, "\n", 2) - words := strings.Split(lines[0], " ") - version := words[len(words)-1] - - // split by major minor version - v := strings.Split(version, ".") - if len(v) == 0 || len(v) > 3 { - return -1, fmt.Errorf("parsing version failed for output: `%s`", output) - } - - // Default the versions to 0. - var majorVersion, minorVersion, patchLevel int - - majorVersion, err := strconv.Atoi(v[0]) - if err != nil { - return -1, err - } - - if len(v) > 1 { - minorVersion, err = strconv.Atoi(v[1]) - if err != nil { - return -1, err - } - } - if len(v) > 2 { - patchLevel, err = strconv.Atoi(v[2]) - if err != nil { - return -1, err - } - } - - // major*10^5 + minor*10^3 + patch*10^0 - numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel - return numericVersion, nil -} diff --git a/pkg/aaparser/aaparser_test.go b/pkg/aaparser/aaparser_test.go deleted file mode 100644 index 69bc8d2fd..000000000 --- a/pkg/aaparser/aaparser_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package aaparser - -import ( - "testing" -) - -type versionExpected struct { - output string - version int -} - -func TestParseVersion(t *testing.T) { - versions := []versionExpected{ - { - output: `AppArmor parser version 2.10 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 210000, - }, - { - output: `AppArmor parser version 2.8 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 208000, - }, - { - output: `AppArmor parser version 2.20 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 220000, - }, - { - output: `AppArmor parser version 2.05 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 205000, - }, - { - output: `AppArmor parser version 2.9.95 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 209095, - }, - { - output: `AppArmor parser version 3.14.159 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 314159, - }, - } - - for _, v := range versions { - version, err := parseVersion(v.output) - if err != nil { - t.Fatalf("expected error to be nil for %#v, got: %v", v, err) - } - if version != v.version { - t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) - } - } -} diff --git a/pkg/authorization/api.go b/pkg/authorization/api.go deleted file mode 100644 index fc82c46b0..000000000 --- a/pkg/authorization/api.go +++ /dev/null @@ -1,54 +0,0 @@ -package authorization - -const ( - // AuthZApiRequest is the url for daemon request authorization - AuthZApiRequest = "AuthZPlugin.AuthZReq" - - // AuthZApiResponse is the url for daemon response authorization - AuthZApiResponse = "AuthZPlugin.AuthZRes" - - // AuthZApiImplements is the name of the interface all AuthZ plugins implement - AuthZApiImplements = "authz" -) - -// Request holds data required for authZ plugins -type Request struct { - // User holds the user extracted by AuthN mechanism - User string `json:"User,omitempty"` - - // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) - UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` - - // RequestMethod holds the HTTP method (GET/POST/PUT) - RequestMethod string `json:"RequestMethod,omitempty"` - - // RequestUri holds the full HTTP uri (e.g., /v1.21/version) - RequestURI string `json:"RequestUri,omitempty"` - - // RequestBody stores the raw request body sent to the docker daemon - RequestBody []byte `json:"RequestBody,omitempty"` - - // RequestHeaders stores the raw request headers sent to the docker daemon - RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` - - // ResponseStatusCode stores the status code returned from docker daemon - ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` - - // ResponseBody stores the raw response body sent from docker daemon - ResponseBody []byte `json:"ResponseBody,omitempty"` - - // ResponseHeaders stores the response headers sent to the docker daemon - ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` -} - -// Response represents authZ plugin response -type Response struct { - // Allow indicating whether the user is allowed or not - Allow bool `json:"Allow"` - - // Msg stores the authorization message - Msg string `json:"Msg,omitempty"` - - // Err stores a message in case there's an error - Err string `json:"Err,omitempty"` -} diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go deleted file mode 100644 index ebbc38061..000000000 --- a/pkg/authorization/authz.go +++ /dev/null @@ -1,179 +0,0 @@ -package authorization - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/ioutils" -) - -const maxBodySize = 1048576 // 1MB - -// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker -// REST http session -// A context provides two method: -// Authenticate Request: -// Call authZ plugins with current REST request and AuthN response -// Request contains full HTTP packet sent to the docker daemon -// https://docs.docker.com/reference/api/docker_remote_api/ -// -// Authenticate Response: -// Call authZ plugins with full info about current REST request, REST response and AuthN response -// The response from this method may contains content that overrides the daemon response -// This allows authZ plugins to filter privileged content -// -// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results -// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order -// is determined according to daemon parameters -func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { - return &Ctx{ - plugins: authZPlugins, - user: user, - userAuthNMethod: userAuthNMethod, - requestMethod: requestMethod, - requestURI: requestURI, - } -} - -// Ctx stores a single request-response interaction context -type Ctx struct { - user string - userAuthNMethod string - requestMethod string - requestURI string - plugins []Plugin - // authReq stores the cached request object for the current transaction - authReq *Request -} - -// AuthZRequest authorized the request to the docker daemon using authZ plugins -func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { - var body []byte - if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { - var err error - body, r.Body, err = drainBody(r.Body) - if err != nil { - return err - } - } - - var h bytes.Buffer - if err := r.Header.Write(&h); err != nil { - return err - } - - ctx.authReq = &Request{ - User: ctx.user, - UserAuthNMethod: ctx.userAuthNMethod, - RequestMethod: ctx.requestMethod, - RequestURI: ctx.requestURI, - RequestBody: body, - RequestHeaders: headers(r.Header), - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZRequest(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - return nil -} - -// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins -func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { - ctx.authReq.ResponseStatusCode = rm.StatusCode() - ctx.authReq.ResponseHeaders = headers(rm.Header()) - - if sendBody(ctx.requestURI, rm.Header()) { - ctx.authReq.ResponseBody = rm.RawBody() - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZResponse(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - rm.FlushAll() - - return nil -} - -// drainBody dump the body (if it's length is less than 1MB) without modifying the request state -func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { - bufReader := bufio.NewReaderSize(body, maxBodySize) - newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - data, err := bufReader.Peek(maxBodySize) - // Body size exceeds max body size - if err == nil { - logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) - return nil, newBody, nil - } - // Body size is less than maximum size - if err == io.EOF { - return data, newBody, nil - } - // Unknown error - return nil, newBody, err -} - -// sendBody returns true when request/response body should be sent to AuthZPlugin -func sendBody(url string, header http.Header) bool { - // Skip body for auth endpoint - if strings.HasSuffix(url, "/auth") { - return false - } - - // body is sent only for text or json messages - return header.Get("Content-Type") == "application/json" -} - -// headers returns flatten version of the http headers excluding authorization -func headers(header http.Header) map[string]string { - v := make(map[string]string, 0) - for k, values := range header { - // Skip authorization headers - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { - continue - } - for _, val := range values { - v[k] = val - } - } - return v -} - -// authorizationError represents an authorization deny error -type authorizationError struct { - error -} - -// HTTPErrorStatusCode returns the authorization error status code (forbidden) -func (e authorizationError) HTTPErrorStatusCode() int { - return http.StatusForbidden -} - -func newAuthorizationError(plugin, msg string) authorizationError { - return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} -} diff --git a/pkg/authorization/authz_unix_test.go b/pkg/authorization/authz_unix_test.go deleted file mode 100644 index e13303f7a..000000000 --- a/pkg/authorization/authz_unix_test.go +++ /dev/null @@ -1,282 +0,0 @@ -// +build !windows - -// TODO Windows: This uses a Unix socket for testing. This might be possible -// to port to Windows using a named pipe instead. - -package authorization - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "path" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/pkg/plugins" - "github.com/docker/go-connections/tlsconfig" - "github.com/gorilla/mux" -) - -const ( - pluginAddress = "authz-test-plugin.sock" -) - -func TestAuthZRequestPluginError(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestBody: []byte("sample body"), - RequestURI: "www.authz.com/auth", - RequestMethod: "GET", - RequestHeaders: map[string]string{"header": "value"}, - } - server.replayResponse = Response{ - Err: "an error", - } - - actualResponse, err := authZPlugin.AuthZRequest(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestAuthZRequestPlugin(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestBody: []byte("sample body"), - RequestURI: "www.authz.com/auth", - RequestMethod: "GET", - RequestHeaders: map[string]string{"header": "value"}, - } - server.replayResponse = Response{ - Allow: true, - Msg: "Sample message", - } - - actualResponse, err := authZPlugin.AuthZRequest(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestAuthZResponsePlugin(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestURI: "someting.com/auth", - RequestBody: []byte("sample body"), - } - server.replayResponse = Response{ - Allow: true, - Msg: "Sample message", - } - - actualResponse, err := authZPlugin.AuthZResponse(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestResponseModifier(t *testing.T) { - r := httptest.NewRecorder() - m := NewResponseModifier(r) - m.Header().Set("h1", "v1") - m.Write([]byte("body")) - m.WriteHeader(500) - - m.FlushAll() - if r.Header().Get("h1") != "v1" { - t.Fatalf("Header value must exists %s", r.Header().Get("h1")) - } - if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { - t.Fatalf("Body value must exists %s", r.Body.Bytes()) - } - if r.Code != 500 { - t.Fatalf("Status code must be correct %d", r.Code) - } -} - -func TestDrainBody(t *testing.T) { - tests := []struct { - length int // length is the message length send to drainBody - expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called - }{ - {10, 10}, // Small message size - {maxBodySize - 1, maxBodySize - 1}, // Max message size - {maxBodySize * 2, 0}, // Large message size (skip copying body) - - } - - for _, test := range tests { - msg := strings.Repeat("a", test.length) - body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) - if err != nil { - t.Fatal(err) - } - if len(body) != test.expectedBodyLength { - t.Fatalf("Body must be copied, actual length: '%d'", len(body)) - } - if closer == nil { - t.Fatal("Closer must not be nil") - } - modified, err := ioutil.ReadAll(closer) - if err != nil { - t.Fatalf("Error must not be nil: '%v'", err) - } - if len(modified) != len(msg) { - t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) - } - } -} - -func TestResponseModifierOverride(t *testing.T) { - r := httptest.NewRecorder() - m := NewResponseModifier(r) - m.Header().Set("h1", "v1") - m.Write([]byte("body")) - m.WriteHeader(500) - - overrideHeader := make(http.Header) - overrideHeader.Add("h1", "v2") - overrideHeaderBytes, err := json.Marshal(overrideHeader) - if err != nil { - t.Fatalf("override header failed %v", err) - } - - m.OverrideHeader(overrideHeaderBytes) - m.OverrideBody([]byte("override body")) - m.OverrideStatusCode(404) - m.FlushAll() - if r.Header().Get("h1") != "v2" { - t.Fatalf("Header value must exists %s", r.Header().Get("h1")) - } - if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { - t.Fatalf("Body value must exists %s", r.Body.Bytes()) - } - if r.Code != 404 { - t.Fatalf("Status code must be correct %d", r.Code) - } -} - -// createTestPlugin creates a new sample authorization plugin -func createTestPlugin(t *testing.T) *authorizationPlugin { - pwd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) - if err != nil { - t.Fatalf("Failed to create client %v", err) - } - - return &authorizationPlugin{name: "plugin", plugin: client} -} - -// AuthZPluginTestServer is a simple server that implements the authZ plugin interface -type authZPluginTestServer struct { - listener net.Listener - t *testing.T - // request stores the request sent from the daemon to the plugin - recordedRequest Request - // response stores the response sent from the plugin to the daemon - replayResponse Response - server *httptest.Server -} - -// start starts the test server that implements the plugin -func (t *authZPluginTestServer) start() { - r := mux.NewRouter() - l, err := net.Listen("unix", pluginAddress) - if err != nil { - t.t.Fatal(err) - } - t.listener = l - r.HandleFunc("/Plugin.Activate", t.activate) - r.HandleFunc("/"+AuthZApiRequest, t.auth) - r.HandleFunc("/"+AuthZApiResponse, t.auth) - t.server = &httptest.Server{ - Listener: l, - Config: &http.Server{ - Handler: r, - Addr: pluginAddress, - }, - } - t.server.Start() -} - -// stop stops the test server that implements the plugin -func (t *authZPluginTestServer) stop() { - t.server.Close() - os.Remove(pluginAddress) - if t.listener != nil { - t.listener.Close() - } -} - -// auth is a used to record/replay the authentication api messages -func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { - t.recordedRequest = Request{} - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.t.Fatal(err) - } - r.Body.Close() - json.Unmarshal(body, &t.recordedRequest) - b, err := json.Marshal(t.replayResponse) - if err != nil { - t.t.Fatal(err) - } - w.Write(b) -} - -func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { - b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) - if err != nil { - t.t.Fatal(err) - } - w.Write(b) -} diff --git a/pkg/authorization/middleware.go b/pkg/authorization/middleware.go deleted file mode 100644 index 73511a814..000000000 --- a/pkg/authorization/middleware.go +++ /dev/null @@ -1,60 +0,0 @@ -package authorization - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" -) - -// Middleware uses a list of plugins to -// handle authorization in the API requests. -type Middleware struct { - plugins []Plugin -} - -// NewMiddleware creates a new Middleware -// with a slice of plugins. -func NewMiddleware(p []Plugin) Middleware { - return Middleware{ - plugins: p, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (m Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - - user := "" - userAuthNMethod := "" - - // Default authorization using existing TLS connection credentials - // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support - // and ldap) will be extracted using AuthN feature, which is tracked under: - // https://github.com/docker/docker/pull/20883 - if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { - user = r.TLS.PeerCertificates[0].Subject.CommonName - userAuthNMethod = "TLS" - } - - authCtx := NewCtx(m.plugins, user, userAuthNMethod, r.Method, r.RequestURI) - - if err := authCtx.AuthZRequest(w, r); err != nil { - logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - rw := NewResponseModifier(w) - - if err := handler(ctx, rw, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - if err := authCtx.AuthZResponse(rw, r); err != nil { - logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - return nil - } -} diff --git a/pkg/authorization/plugin.go b/pkg/authorization/plugin.go deleted file mode 100644 index fc5c7efb4..000000000 --- a/pkg/authorization/plugin.go +++ /dev/null @@ -1,92 +0,0 @@ -package authorization - -import ( - "sync" - - "github.com/docker/docker/pkg/plugins" -) - -// Plugin allows third party plugins to authorize requests and responses -// in the context of docker API -type Plugin interface { - // Name returns the registered plugin name - Name() string - - // AuthZRequest authorizes the request from the client to the daemon - AuthZRequest(*Request) (*Response, error) - - // AuthZResponse authorizes the response from the daemon to the client - AuthZResponse(*Request) (*Response, error) -} - -// NewPlugins constructs and initializes the authorization plugins based on plugin names -func NewPlugins(names []string) []Plugin { - plugins := []Plugin{} - pluginsMap := make(map[string]struct{}) - for _, name := range names { - if _, ok := pluginsMap[name]; ok { - continue - } - pluginsMap[name] = struct{}{} - plugins = append(plugins, newAuthorizationPlugin(name)) - } - return plugins -} - -// authorizationPlugin is an internal adapter to docker plugin system -type authorizationPlugin struct { - plugin *plugins.Client - name string - once sync.Once -} - -func newAuthorizationPlugin(name string) Plugin { - return &authorizationPlugin{name: name} -} - -func (a *authorizationPlugin) Name() string { - return a.name -} - -func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -// initPlugin initializes the authorization plugin if needed -func (a *authorizationPlugin) initPlugin() error { - // Lazy loading of plugins - var err error - a.once.Do(func() { - if a.plugin == nil { - plugin, e := plugins.Get(a.name, AuthZApiImplements) - if e != nil { - err = e - return - } - a.plugin = plugin.Client() - } - }) - return err -} diff --git a/pkg/authorization/response.go b/pkg/authorization/response.go deleted file mode 100644 index 9eec82d42..000000000 --- a/pkg/authorization/response.go +++ /dev/null @@ -1,201 +0,0 @@ -package authorization - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "net" - "net/http" - - "github.com/Sirupsen/logrus" -) - -// ResponseModifier allows authorization plugins to read and modify the content of the http.response -type ResponseModifier interface { - http.ResponseWriter - http.Flusher - http.CloseNotifier - - // RawBody returns the current http content - RawBody() []byte - - // RawHeaders returns the current content of the http headers - RawHeaders() ([]byte, error) - - // StatusCode returns the current status code - StatusCode() int - - // OverrideBody replaces the body of the HTTP reply - OverrideBody(b []byte) - - // OverrideHeader replaces the headers of the HTTP reply - OverrideHeader(b []byte) error - - // OverrideStatusCode replaces the status code of the HTTP reply - OverrideStatusCode(statusCode int) - - // Flush flushes all data to the HTTP response - FlushAll() error - - // Hijacked indicates the response has been hijacked by the Docker daemon - Hijacked() bool -} - -// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content -func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { - return &responseModifier{rw: rw, header: make(http.Header)} -} - -// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore -// the http request/response from docker daemon -type responseModifier struct { - // The original response writer - rw http.ResponseWriter - // body holds the response body - body []byte - // header holds the response header - header http.Header - // statusCode holds the response status code - statusCode int - // hijacked indicates the request has been hijacked - hijacked bool -} - -func (rm *responseModifier) Hijacked() bool { - return rm.hijacked -} - -// WriterHeader stores the http status code -func (rm *responseModifier) WriteHeader(s int) { - - // Use original request if hijacked - if rm.hijacked { - rm.rw.WriteHeader(s) - return - } - - rm.statusCode = s -} - -// Header returns the internal http header -func (rm *responseModifier) Header() http.Header { - - // Use original header if hijacked - if rm.hijacked { - return rm.rw.Header() - } - - return rm.header -} - -// StatusCode returns the http status code -func (rm *responseModifier) StatusCode() int { - return rm.statusCode -} - -// OverrideBody replaces the body of the HTTP response -func (rm *responseModifier) OverrideBody(b []byte) { - rm.body = b -} - -// OverrideStatusCode replaces the status code of the HTTP response -func (rm *responseModifier) OverrideStatusCode(statusCode int) { - rm.statusCode = statusCode -} - -// OverrideHeader replaces the headers of the HTTP response -func (rm *responseModifier) OverrideHeader(b []byte) error { - header := http.Header{} - if err := json.Unmarshal(b, &header); err != nil { - return err - } - rm.header = header - return nil -} - -// Write stores the byte array inside content -func (rm *responseModifier) Write(b []byte) (int, error) { - - if rm.hijacked { - return rm.rw.Write(b) - } - - rm.body = append(rm.body, b...) - return len(b), nil -} - -// Body returns the response body -func (rm *responseModifier) RawBody() []byte { - return rm.body -} - -func (rm *responseModifier) RawHeaders() ([]byte, error) { - var b bytes.Buffer - if err := rm.header.Write(&b); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Hijack returns the internal connection of the wrapped http.ResponseWriter -func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { - - rm.hijacked = true - rm.FlushAll() - - hijacker, ok := rm.rw.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} - -// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter -func (rm *responseModifier) CloseNotify() <-chan bool { - closeNotifier, ok := rm.rw.(http.CloseNotifier) - if !ok { - logrus.Error("Internal response writer doesn't support the CloseNotifier interface") - return nil - } - return closeNotifier.CloseNotify() -} - -// Flush uses the internal flush API of the wrapped http.ResponseWriter -func (rm *responseModifier) Flush() { - flusher, ok := rm.rw.(http.Flusher) - if !ok { - logrus.Error("Internal response writer doesn't support the Flusher interface") - return - } - - rm.FlushAll() - flusher.Flush() -} - -// FlushAll flushes all data to the HTTP response -func (rm *responseModifier) FlushAll() error { - // Copy the status code - if rm.statusCode > 0 { - rm.rw.WriteHeader(rm.statusCode) - } - - // Copy the header - for k, vv := range rm.header { - for _, v := range vv { - rm.rw.Header().Add(k, v) - } - } - - var err error - if len(rm.body) > 0 { - // Write body - _, err = rm.rw.Write(rm.body) - } - - // Clean previous data - rm.body = nil - rm.statusCode = 0 - rm.header = http.Header{} - return err -} diff --git a/pkg/broadcaster/unbuffered.go b/pkg/broadcaster/unbuffered.go deleted file mode 100644 index 784d65d6f..000000000 --- a/pkg/broadcaster/unbuffered.go +++ /dev/null @@ -1,49 +0,0 @@ -package broadcaster - -import ( - "io" - "sync" -) - -// Unbuffered accumulates multiple io.WriteCloser by stream. -type Unbuffered struct { - mu sync.Mutex - writers []io.WriteCloser -} - -// Add adds new io.WriteCloser. -func (w *Unbuffered) Add(writer io.WriteCloser) { - w.mu.Lock() - w.writers = append(w.writers, writer) - w.mu.Unlock() -} - -// Write writes bytes to all writers. Failed writers will be evicted during -// this call. -func (w *Unbuffered) Write(p []byte) (n int, err error) { - w.mu.Lock() - var evict []int - for i, sw := range w.writers { - if n, err := sw.Write(p); err != nil || n != len(p) { - // On error, evict the writer - evict = append(evict, i) - } - } - for n, i := range evict { - w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) - } - w.mu.Unlock() - return len(p), nil -} - -// Clean closes and removes all writers. Last non-eol-terminated part of data -// will be saved. -func (w *Unbuffered) Clean() error { - w.mu.Lock() - for _, sw := range w.writers { - sw.Close() - } - w.writers = nil - w.mu.Unlock() - return nil -} diff --git a/pkg/broadcaster/unbuffered_test.go b/pkg/broadcaster/unbuffered_test.go deleted file mode 100644 index 9f8e72bc0..000000000 --- a/pkg/broadcaster/unbuffered_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package broadcaster - -import ( - "bytes" - "errors" - "strings" - - "testing" -) - -type dummyWriter struct { - buffer bytes.Buffer - failOnWrite bool -} - -func (dw *dummyWriter) Write(p []byte) (n int, err error) { - if dw.failOnWrite { - return 0, errors.New("Fake fail") - } - return dw.buffer.Write(p) -} - -func (dw *dummyWriter) String() string { - return dw.buffer.String() -} - -func (dw *dummyWriter) Close() error { - return nil -} - -func TestUnbuffered(t *testing.T) { - writer := new(Unbuffered) - - // Test 1: Both bufferA and bufferB should contain "foo" - bufferA := &dummyWriter{} - writer.Add(bufferA) - bufferB := &dummyWriter{} - writer.Add(bufferB) - writer.Write([]byte("foo")) - - if bufferA.String() != "foo" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foo" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - // Test2: bufferA and bufferB should contain "foobar", - // while bufferC should only contain "bar" - bufferC := &dummyWriter{} - writer.Add(bufferC) - writer.Write([]byte("bar")) - - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - if bufferC.String() != "bar" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - // Test3: Test eviction on failure - bufferA.failOnWrite = true - writer.Write([]byte("fail")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfail" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - // Even though we reset the flag, no more writes should go in there - bufferA.failOnWrite = false - writer.Write([]byte("test")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfailtest" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - // Test4: Test eviction on multiple simultaneous failures - bufferB.failOnWrite = true - bufferC.failOnWrite = true - bufferD := &dummyWriter{} - writer.Add(bufferD) - writer.Write([]byte("yo")) - writer.Write([]byte("ink")) - if strings.Contains(bufferB.String(), "yoink") { - t.Errorf("bufferB received write. contents: %q", bufferB) - } - if strings.Contains(bufferC.String(), "yoink") { - t.Errorf("bufferC received write. contents: %q", bufferC) - } - if g, w := bufferD.String(), "yoink"; g != w { - t.Errorf("bufferD = %q, want %q", g, w) - } - - writer.Clean() -} - -type devNullCloser int - -func (d devNullCloser) Close() error { - return nil -} - -func (d devNullCloser) Write(buf []byte) (int, error) { - return len(buf), nil -} - -// This test checks for races. It is only useful when run with the race detector. -func TestRaceUnbuffered(t *testing.T) { - writer := new(Unbuffered) - c := make(chan bool) - go func() { - writer.Add(devNullCloser(0)) - c <- true - }() - writer.Write([]byte("hello")) - <-c -} - -func BenchmarkUnbuffered(b *testing.B) { - writer := new(Unbuffered) - setUpWriter := func() { - for i := 0; i < 100; i++ { - writer.Add(devNullCloser(0)) - writer.Add(devNullCloser(0)) - writer.Add(devNullCloser(0)) - } - } - testLine := "Line that thinks that it is log line from docker" - var buf bytes.Buffer - for i := 0; i < 100; i++ { - buf.Write([]byte(testLine + "\n")) - } - // line without eol - buf.Write([]byte(testLine)) - testText := buf.Bytes() - b.SetBytes(int64(5 * len(testText))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - setUpWriter() - b.StartTimer() - - for j := 0; j < 5; j++ { - if _, err := writer.Write(testText); err != nil { - b.Fatal(err) - } - } - - b.StopTimer() - writer.Clean() - b.StartTimer() - } -} diff --git a/pkg/discovery/README.md b/pkg/discovery/README.md deleted file mode 100644 index 39777c217..000000000 --- a/pkg/discovery/README.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -page_title: Docker discovery -page_description: discovery -page_keywords: docker, clustering, discovery ---- - -# Discovery - -Docker comes with multiple Discovery backends. - -## Backends - -### Using etcd - -Point your Docker Engine instances to a common etcd instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ -``` - -### Using consul - -Point your Docker Engine instances to a common Consul instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// -``` - -### Using zookeeper - -Point your Docker Engine instances to a common Zookeeper instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ -``` diff --git a/pkg/discovery/backends.go b/pkg/discovery/backends.go deleted file mode 100644 index 65364c9ae..000000000 --- a/pkg/discovery/backends.go +++ /dev/null @@ -1,107 +0,0 @@ -package discovery - -import ( - "fmt" - "net" - "strings" - "time" - - log "github.com/Sirupsen/logrus" -) - -var ( - // Backends is a global map of discovery backends indexed by their - // associated scheme. - backends = make(map[string]Backend) -) - -// Register makes a discovery backend available by the provided scheme. -// If Register is called twice with the same scheme an error is returned. -func Register(scheme string, d Backend) error { - if _, exists := backends[scheme]; exists { - return fmt.Errorf("scheme already registered %s", scheme) - } - log.WithField("name", scheme).Debug("Registering discovery service") - backends[scheme] = d - return nil -} - -func parse(rawurl string) (string, string) { - parts := strings.SplitN(rawurl, "://", 2) - - // nodes:port,node2:port => nodes://node1:port,node2:port - if len(parts) == 1 { - return "nodes", parts[0] - } - return parts[0], parts[1] -} - -// ParseAdvertise parses the --cluster-advertise daemon config which accepts -// : or : -func ParseAdvertise(advertise string) (string, error) { - var ( - iface *net.Interface - addrs []net.Addr - err error - ) - - addr, port, err := net.SplitHostPort(advertise) - - if err != nil { - return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) - } - - ip := net.ParseIP(addr) - // If it is a valid ip-address, use it as is - if ip != nil { - return advertise, nil - } - - // If advertise is a valid interface name, get the valid ipv4 address and use it to advertise - ifaceName := addr - iface, err = net.InterfaceByName(ifaceName) - if err != nil { - return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) - } - - addrs, err = iface.Addrs() - if err != nil { - return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) - } - - if addrs == nil || len(addrs) == 0 { - return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) - } - - addr = "" - for _, a := range addrs { - ip, _, err := net.ParseCIDR(a.String()) - if err != nil { - return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) - } - if ip.To4() == nil || ip.IsLoopback() { - continue - } - addr = ip.String() - break - } - if addr == "" { - return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise) - } - - addr = net.JoinHostPort(addr, port) - return addr, nil -} - -// New returns a new Discovery given a URL, heartbeat and ttl settings. -// Returns an error if the URL scheme is not supported. -func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { - scheme, uri := parse(rawurl) - if backend, exists := backends[scheme]; exists { - log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service") - err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) - return backend, err - } - - return nil, ErrNotSupported -} diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go deleted file mode 100644 index ca7f58745..000000000 --- a/pkg/discovery/discovery.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery - -import ( - "errors" - "time" -) - -var ( - // ErrNotSupported is returned when a discovery service is not supported. - ErrNotSupported = errors.New("discovery service not supported") - - // ErrNotImplemented is returned when discovery feature is not implemented - // by discovery backend. - ErrNotImplemented = errors.New("not implemented in this discovery service") -) - -// Watcher provides watching over a cluster for nodes joining and leaving. -type Watcher interface { - // Watch the discovery for entry changes. - // Returns a channel that will receive changes or an error. - // Providing a non-nil stopCh can be used to stop watching. - Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) -} - -// Backend is implemented by discovery backends which manage cluster entries. -type Backend interface { - // Watcher must be provided by every backend. - Watcher - - // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. - Initialize(string, time.Duration, time.Duration, map[string]string) error - - // Register to the discovery. - Register(string) error -} diff --git a/pkg/discovery/discovery_test.go b/pkg/discovery/discovery_test.go deleted file mode 100644 index 6084f3ef0..000000000 --- a/pkg/discovery/discovery_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package discovery - -import ( - "testing" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestNewEntry(c *check.C) { - entry, err := NewEntry("127.0.0.1:2375") - c.Assert(err, check.IsNil) - c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) - c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") - - entry, err = NewEntry("[2001:db8:0:f101::2]:2375") - c.Assert(err, check.IsNil) - c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) - c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") - - _, err = NewEntry("127.0.0.1") - c.Assert(err, check.NotNil) -} - -func (s *DiscoverySuite) TestParse(c *check.C) { - scheme, uri := parse("127.0.0.1:2375") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "127.0.0.1:2375") - - scheme, uri = parse("localhost:2375") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "localhost:2375") - - scheme, uri = parse("scheme://127.0.0.1:2375") - c.Assert(scheme, check.Equals, "scheme") - c.Assert(uri, check.Equals, "127.0.0.1:2375") - - scheme, uri = parse("scheme://localhost:2375") - c.Assert(scheme, check.Equals, "scheme") - c.Assert(uri, check.Equals, "localhost:2375") - - scheme, uri = parse("") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "") -} - -func (s *DiscoverySuite) TestCreateEntries(c *check.C) { - entries, err := CreateEntries(nil) - c.Assert(entries, check.DeepEquals, Entries{}) - c.Assert(err, check.IsNil) - - entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) - c.Assert(err, check.IsNil) - expected := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, - } - c.Assert(entries.Equals(expected), check.Equals, true) - - _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) - c.Assert(err, check.NotNil) -} - -func (s *DiscoverySuite) TestContainsEntry(c *check.C) { - entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) - c.Assert(err, check.IsNil) - c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) - c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) -} - -func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { - entries := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - } - - // Same - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - }), check. - Equals, true) - - // Different size - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - &Entry{Host: "127.0.0.3", Port: "2375"}, - }), check. - Equals, false) - - // Different content - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.42", Port: "2375"}, - }), check. - Equals, false) - -} - -func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { - entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} - entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} - entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} - entries := Entries{entry1, entry2} - - // No diff - added, removed := entries.Diff(Entries{entry2, entry1}) - c.Assert(added, check.HasLen, 0) - c.Assert(removed, check.HasLen, 0) - - // Add - added, removed = entries.Diff(Entries{entry2, entry3, entry1}) - c.Assert(added, check.HasLen, 1) - c.Assert(added.Contains(entry3), check.Equals, true) - c.Assert(removed, check.HasLen, 0) - - // Remove - added, removed = entries.Diff(Entries{entry2}) - c.Assert(added, check.HasLen, 0) - c.Assert(removed, check.HasLen, 1) - c.Assert(removed.Contains(entry1), check.Equals, true) - - // Add and remove - added, removed = entries.Diff(Entries{entry1, entry3}) - c.Assert(added, check.HasLen, 1) - c.Assert(added.Contains(entry3), check.Equals, true) - c.Assert(removed, check.HasLen, 1) - c.Assert(removed.Contains(entry2), check.Equals, true) -} diff --git a/pkg/discovery/entry.go b/pkg/discovery/entry.go deleted file mode 100644 index ce23bbf89..000000000 --- a/pkg/discovery/entry.go +++ /dev/null @@ -1,94 +0,0 @@ -package discovery - -import "net" - -// NewEntry creates a new entry. -func NewEntry(url string) (*Entry, error) { - host, port, err := net.SplitHostPort(url) - if err != nil { - return nil, err - } - return &Entry{host, port}, nil -} - -// An Entry represents a host. -type Entry struct { - Host string - Port string -} - -// Equals returns true if cmp contains the same data. -func (e *Entry) Equals(cmp *Entry) bool { - return e.Host == cmp.Host && e.Port == cmp.Port -} - -// String returns the string form of an entry. -func (e *Entry) String() string { - return net.JoinHostPort(e.Host, e.Port) -} - -// Entries is a list of *Entry with some helpers. -type Entries []*Entry - -// Equals returns true if cmp contains the same data. -func (e Entries) Equals(cmp Entries) bool { - // Check if the file has really changed. - if len(e) != len(cmp) { - return false - } - for i := range e { - if !e[i].Equals(cmp[i]) { - return false - } - } - return true -} - -// Contains returns true if the Entries contain a given Entry. -func (e Entries) Contains(entry *Entry) bool { - for _, curr := range e { - if curr.Equals(entry) { - return true - } - } - return false -} - -// Diff compares two entries and returns the added and removed entries. -func (e Entries) Diff(cmp Entries) (Entries, Entries) { - added := Entries{} - for _, entry := range cmp { - if !e.Contains(entry) { - added = append(added, entry) - } - } - - removed := Entries{} - for _, entry := range e { - if !cmp.Contains(entry) { - removed = append(removed, entry) - } - } - - return added, removed -} - -// CreateEntries returns an array of entries based on the given addresses. -func CreateEntries(addrs []string) (Entries, error) { - entries := Entries{} - if addrs == nil { - return entries, nil - } - - for _, addr := range addrs { - if len(addr) == 0 { - continue - } - entry, err := NewEntry(addr) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - return entries, nil -} diff --git a/pkg/discovery/file/file.go b/pkg/discovery/file/file.go deleted file mode 100644 index b4f870b86..000000000 --- a/pkg/discovery/file/file.go +++ /dev/null @@ -1,109 +0,0 @@ -package file - -import ( - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - heartbeat time.Duration - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("file", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { - s.path = path - s.heartbeat = heartbeat - return nil -} - -func parseFileContent(content []byte) []string { - var result []string - for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { - line = strings.TrimSpace(line) - // Ignoring line starts with # - if strings.HasPrefix(line, "#") { - continue - } - // Inlined # comment also ignored. - if strings.Contains(line, "#") { - line = line[0:strings.Index(line, "#")] - // Trim additional spaces caused by above stripping. - line = strings.TrimSpace(line) - } - for _, ip := range discovery.Generate(line) { - result = append(result, ip) - } - } - return result -} - -func (s *Discovery) fetch() (discovery.Entries, error) { - fileContent, err := ioutil.ReadFile(s.path) - if err != nil { - return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) - } - return discovery.CreateEntries(parseFileContent(fileContent)) -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - currentEntries, err := s.fetch() - if err != nil { - errCh <- err - } else { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - newEntries, err := s.fetch() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/pkg/discovery/file/file_test.go b/pkg/discovery/file/file_test.go deleted file mode 100644 index 667f00ba0..000000000 --- a/pkg/discovery/file/file_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package file - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/pkg/discovery" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestInitialize(c *check.C) { - d := &Discovery{} - d.Initialize("/path/to/file", 1000, 0, nil) - c.Assert(d.path, check.Equals, "/path/to/file") -} - -func (s *DiscoverySuite) TestNew(c *check.C) { - d, err := discovery.New("file:///path/to/file", 0, 0, nil) - c.Assert(err, check.IsNil) - c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") -} - -func (s *DiscoverySuite) TestContent(c *check.C) { - data := ` -1.1.1.[1:2]:1111 -2.2.2.[2:4]:2222 -` - ips := parseFileContent([]byte(data)) - c.Assert(ips, check.HasLen, 5) - c.Assert(ips[0], check.Equals, "1.1.1.1:1111") - c.Assert(ips[1], check.Equals, "1.1.1.2:1111") - c.Assert(ips[2], check.Equals, "2.2.2.2:2222") - c.Assert(ips[3], check.Equals, "2.2.2.3:2222") - c.Assert(ips[4], check.Equals, "2.2.2.4:2222") -} - -func (s *DiscoverySuite) TestRegister(c *check.C) { - discovery := &Discovery{path: "/path/to/file"} - c.Assert(discovery.Register("0.0.0.0"), check.NotNil) -} - -func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { - data := ` -### test ### -1.1.1.1:1111 # inline comment -# 2.2.2.2:2222 - ### empty line with comment - 3.3.3.3:3333 -### test ### -` - ips := parseFileContent([]byte(data)) - c.Assert(ips, check.HasLen, 2) - c.Assert("1.1.1.1:1111", check.Equals, ips[0]) - c.Assert("3.3.3.3:3333", check.Equals, ips[1]) -} - -func (s *DiscoverySuite) TestWatch(c *check.C) { - data := ` -1.1.1.1:1111 -2.2.2.2:2222 -` - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - - // Create a temporary file and remove it. - tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") - c.Assert(err, check.IsNil) - c.Assert(tmp.Close(), check.IsNil) - c.Assert(os.Remove(tmp.Name()), check.IsNil) - - // Set up file discovery. - d := &Discovery{} - d.Initialize(tmp.Name(), 1000, 0, nil) - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // Make sure it fires errors since the file doesn't exist. - c.Assert(<-errCh, check.NotNil) - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - // Write the file and make sure we get the expected value back. - c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - // Add a new entry and look it up. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) - c.Assert(err, check.IsNil) - c.Assert(f, check.NotNil) - _, err = f.WriteString("\n3.3.3.3:3333\n") - c.Assert(err, check.IsNil) - f.Close() - c.Assert(<-ch, check.DeepEquals, expected) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} diff --git a/pkg/discovery/generator.go b/pkg/discovery/generator.go deleted file mode 100644 index d22298298..000000000 --- a/pkg/discovery/generator.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery - -import ( - "fmt" - "regexp" - "strconv" -) - -// Generate takes care of IP generation -func Generate(pattern string) []string { - re, _ := regexp.Compile(`\[(.+):(.+)\]`) - submatch := re.FindStringSubmatch(pattern) - if submatch == nil { - return []string{pattern} - } - - from, err := strconv.Atoi(submatch[1]) - if err != nil { - return []string{pattern} - } - to, err := strconv.Atoi(submatch[2]) - if err != nil { - return []string{pattern} - } - - template := re.ReplaceAllString(pattern, "%d") - - var result []string - for val := from; val <= to; val++ { - entry := fmt.Sprintf(template, val) - result = append(result, entry) - } - - return result -} diff --git a/pkg/discovery/generator_test.go b/pkg/discovery/generator_test.go deleted file mode 100644 index 6281c4666..000000000 --- a/pkg/discovery/generator_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package discovery - -import ( - "github.com/go-check/check" -) - -func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { - ips := Generate("127.0.0.1") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.1") -} - -func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { - ips := Generate("127.0.0.1:8080") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.1:8080") -} - -func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { - ips := Generate("127.0.0.[1]") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.[1]") -} - -func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { - ips := Generate("127.0.0.[1:11]:2375") - c.Assert(len(ips), check.Equals, 11) - c.Assert(ips[0], check.Equals, "127.0.0.1:2375") - c.Assert(ips[1], check.Equals, "127.0.0.2:2375") - c.Assert(ips[2], check.Equals, "127.0.0.3:2375") - c.Assert(ips[3], check.Equals, "127.0.0.4:2375") - c.Assert(ips[4], check.Equals, "127.0.0.5:2375") - c.Assert(ips[5], check.Equals, "127.0.0.6:2375") - c.Assert(ips[6], check.Equals, "127.0.0.7:2375") - c.Assert(ips[7], check.Equals, "127.0.0.8:2375") - c.Assert(ips[8], check.Equals, "127.0.0.9:2375") - c.Assert(ips[9], check.Equals, "127.0.0.10:2375") - c.Assert(ips[10], check.Equals, "127.0.0.11:2375") -} - -func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { - malformedInput := "127.0.0.[x:11]:2375" - ips := Generate(malformedInput) - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, malformedInput) -} - -func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { - malformedInput := "127.0.0.[1:x]:2375" - ips := Generate(malformedInput) - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, malformedInput) -} diff --git a/pkg/discovery/kv/kv.go b/pkg/discovery/kv/kv.go deleted file mode 100644 index f371c0cba..000000000 --- a/pkg/discovery/kv/kv.go +++ /dev/null @@ -1,192 +0,0 @@ -package kv - -import ( - "fmt" - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/discovery" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/libkv" - "github.com/docker/libkv/store" - "github.com/docker/libkv/store/consul" - "github.com/docker/libkv/store/etcd" - "github.com/docker/libkv/store/zookeeper" -) - -const ( - defaultDiscoveryPath = "docker/nodes" -) - -// Discovery is exported -type Discovery struct { - backend store.Backend - store store.Store - heartbeat time.Duration - ttl time.Duration - prefix string - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - // Register to libkv - zookeeper.Register() - consul.Register() - etcd.Register() - - // Register to internal discovery service - discovery.Register("zk", &Discovery{backend: store.ZK}) - discovery.Register("consul", &Discovery{backend: store.CONSUL}) - discovery.Register("etcd", &Discovery{backend: store.ETCD}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { - var ( - parts = strings.SplitN(uris, "/", 2) - addrs = strings.Split(parts[0], ",") - err error - ) - - // A custom prefix to the path can be optionally used. - if len(parts) == 2 { - s.prefix = parts[1] - } - - s.heartbeat = heartbeat - s.ttl = ttl - - // Use a custom path if specified in discovery options - dpath := defaultDiscoveryPath - if clusterOpts["kv.path"] != "" { - dpath = clusterOpts["kv.path"] - } - - s.path = path.Join(s.prefix, dpath) - - var config *store.Config - if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { - log.Info("Initializing discovery with TLS") - tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }) - if err != nil { - return err - } - config = &store.Config{ - // Set ClientTLS to trigger https (bug in libkv/etcd) - ClientTLS: &store.ClientTLSConfig{ - CACertFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }, - // The actual TLS config that will be used - TLS: tlsConfig, - } - } else { - log.Info("Initializing discovery without TLS") - } - - // Creates a new store, will ignore options given - // if not supported by the chosen store - s.store, err = libkv.NewStore(s.backend, addrs, config) - return err -} - -// Watch the store until either there's a store error or we receive a stop request. -// Returns false if we shouldn't attempt watching the store anymore (stop request received). -func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { - for { - select { - case pairs := <-watchCh: - if pairs == nil { - return true - } - - log.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) - - // Convert `KVPair` into `discovery.Entry`. - addrs := make([]string, len(pairs)) - for _, pair := range pairs { - addrs = append(addrs, string(pair.Value)) - } - - entries, err := discovery.CreateEntries(addrs) - if err != nil { - errCh <- err - } else { - discoveryCh <- entries - } - case <-stopCh: - // We were requested to stop watching. - return false - } - } -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - - go func() { - defer close(ch) - defer close(errCh) - - // Forever: Create a store watch, watch until we get an error and then try again. - // Will only stop if we receive a stopCh request. - for { - // Create the path to watch if it does not exist yet - exists, err := s.store.Exists(s.path) - if err != nil { - errCh <- err - } - if !exists { - if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { - errCh <- err - } - } - - // Set up a watch. - watchCh, err := s.store.WatchTree(s.path, stopCh) - if err != nil { - errCh <- err - } else { - if !s.watchOnce(stopCh, watchCh, ch, errCh) { - return - } - } - - // If we get here it means the store watch channel was closed. This - // is unexpected so let's retry later. - errCh <- fmt.Errorf("Unexpected watch error") - time.Sleep(s.heartbeat) - } - }() - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - opts := &store.WriteOptions{TTL: s.ttl} - return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) -} - -// Store returns the underlying store used by KV discovery. -func (s *Discovery) Store() store.Store { - return s.store -} - -// Prefix returns the store prefix -func (s *Discovery) Prefix() string { - return s.prefix -} diff --git a/pkg/discovery/kv/kv_test.go b/pkg/discovery/kv/kv_test.go deleted file mode 100644 index dab3939dd..000000000 --- a/pkg/discovery/kv/kv_test.go +++ /dev/null @@ -1,324 +0,0 @@ -package kv - -import ( - "errors" - "io/ioutil" - "os" - "path" - "testing" - "time" - - "github.com/docker/docker/pkg/discovery" - "github.com/docker/libkv" - "github.com/docker/libkv/store" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (ds *DiscoverySuite) TestInitialize(c *check.C) { - storeMock := &FakeStore{ - Endpoints: []string{"127.0.0.1"}, - } - d := &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1", 0, 0, nil) - d.store = storeMock - - s := d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 1) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") - c.Assert(d.path, check.Equals, defaultDiscoveryPath) - - storeMock = &FakeStore{ - Endpoints: []string{"127.0.0.1:1234"}, - } - d = &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234/path", 0, 0, nil) - d.store = storeMock - - s = d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 1) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") - c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) - - storeMock = &FakeStore{ - Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, - } - d = &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) - d.store = storeMock - - s = d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 3) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") - c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") - c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") - - c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) -} - -// Extremely limited mock store so we can test initialization -type Mock struct { - // Endpoints passed to InitializeMock - Endpoints []string - - // Options passed to InitializeMock - Options *store.Config -} - -func NewMock(endpoints []string, options *store.Config) (store.Store, error) { - s := &Mock{} - s.Endpoints = endpoints - s.Options = options - return s, nil -} -func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { - return errors.New("Put not supported") -} -func (s *Mock) Get(key string) (*store.KVPair, error) { - return nil, errors.New("Get not supported") -} -func (s *Mock) Delete(key string) error { - return errors.New("Delete not supported") -} - -// Exists mock -func (s *Mock) Exists(key string) (bool, error) { - return false, errors.New("Exists not supported") -} - -// Watch mock -func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { - return nil, errors.New("Watch not supported") -} - -// WatchTree mock -func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { - return nil, errors.New("WatchTree not supported") -} - -// NewLock mock -func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { - return nil, errors.New("NewLock not supported") -} - -// List mock -func (s *Mock) List(prefix string) ([]*store.KVPair, error) { - return nil, errors.New("List not supported") -} - -// DeleteTree mock -func (s *Mock) DeleteTree(prefix string) error { - return errors.New("DeleteTree not supported") -} - -// AtomicPut mock -func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { - return false, nil, errors.New("AtomicPut not supported") -} - -// AtomicDelete mock -func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { - return false, errors.New("AtomicDelete not supported") -} - -// Close mock -func (s *Mock) Close() { - return -} - -func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { - cert := `-----BEGIN CERTIFICATE----- -MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT -B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD -VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC -O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds -+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q -V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb -UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 -Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT -V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ -BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j -BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz -7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI -xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M -ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY -8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn -t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX -FpTxDmJHEV4bzUzh ------END CERTIFICATE----- -` - key := `-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 -+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR -SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr -pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe -rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj -xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj -i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx -qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO -1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 -5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony -MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 -ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP -L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N -XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT -Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B -LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU -t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ -QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV -xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj -xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc -qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa -V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV -PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk -dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL -BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= ------END RSA PRIVATE KEY----- -` - certFile, err := ioutil.TempFile("", "cert") - c.Assert(err, check.IsNil) - defer os.Remove(certFile.Name()) - certFile.Write([]byte(cert)) - certFile.Close() - keyFile, err := ioutil.TempFile("", "key") - c.Assert(err, check.IsNil) - defer os.Remove(keyFile.Name()) - keyFile.Write([]byte(key)) - keyFile.Close() - - libkv.AddStore("mock", NewMock) - d := &Discovery{backend: "mock"} - err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ - "kv.cacertfile": certFile.Name(), - "kv.certfile": certFile.Name(), - "kv.keyfile": keyFile.Name(), - }) - c.Assert(err, check.IsNil) - s := d.store.(*Mock) - c.Assert(s.Options.TLS, check.NotNil) - c.Assert(s.Options.TLS.RootCAs, check.NotNil) - c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) -} - -func (ds *DiscoverySuite) TestWatch(c *check.C) { - mockCh := make(chan []*store.KVPair) - - storeMock := &FakeStore{ - Endpoints: []string{"127.0.0.1:1234"}, - mockKVChan: mockCh, - } - - d := &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234/path", 0, 0, nil) - d.store = storeMock - - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - kvs := []*store.KVPair{ - {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, - {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, - } - - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // It should fire an error since the first WatchTree call failed. - c.Assert(<-errCh, check.ErrorMatches, "test error") - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - // Push the entries into the store channel and make sure discovery emits. - mockCh <- kvs - c.Assert(<-ch, check.DeepEquals, expected) - - // Add a new entry. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) - mockCh <- kvs - c.Assert(<-ch, check.DeepEquals, expected) - - close(mockCh) - // Give it enough time to call WatchTree. - time.Sleep(3 * time.Second) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} - -// FakeStore implements store.Store methods. It mocks all store -// function in a simple, naive way. -type FakeStore struct { - Endpoints []string - Options *store.Config - mockKVChan <-chan []*store.KVPair - - watchTreeCallCount int -} - -func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { - return nil -} - -func (s *FakeStore) Get(key string) (*store.KVPair, error) { - return nil, nil -} - -func (s *FakeStore) Delete(key string) error { - return nil -} - -func (s *FakeStore) Exists(key string) (bool, error) { - return true, nil -} - -func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { - return nil, nil -} - -// WatchTree will fail the first time, and return the mockKVchan afterwards. -// This is the behavior we need for testing.. If we need 'moar', should update this. -func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { - if s.watchTreeCallCount == 0 { - s.watchTreeCallCount = 1 - return nil, errors.New("test error") - } - // First calls error - return s.mockKVChan, nil -} - -func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { - return nil, nil -} - -func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { - return []*store.KVPair{}, nil -} - -func (s *FakeStore) DeleteTree(directory string) error { - return nil -} - -func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { - return true, nil, nil -} - -func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { - return true, nil -} - -func (s *FakeStore) Close() { -} diff --git a/pkg/discovery/memory/memory.go b/pkg/discovery/memory/memory.go deleted file mode 100644 index ba8b1f55f..000000000 --- a/pkg/discovery/memory/memory.go +++ /dev/null @@ -1,93 +0,0 @@ -package memory - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery implements a discovery backend that keeps -// data in memory. -type Discovery struct { - heartbeat time.Duration - values []string - mu sync.Mutex -} - -func init() { - Init() -} - -// Init registers the memory backend on demand. -func Init() { - discovery.Register("memory", &Discovery{}) -} - -// Initialize sets the heartbeat for the memory backend. -func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { - s.heartbeat = heartbeat - s.values = make([]string, 0) - return nil -} - -// Watch sends periodic discovery updates to a channel. -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - var currentEntries discovery.Entries - var err error - - s.mu.Lock() - if len(s.values) > 0 { - currentEntries, err = discovery.CreateEntries(s.values) - } - s.mu.Unlock() - - if err != nil { - errCh <- err - } else if currentEntries != nil { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - s.mu.Lock() - newEntries, err := discovery.CreateEntries(s.values) - s.mu.Unlock() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register adds a new address to the discovery. -func (s *Discovery) Register(addr string) error { - s.mu.Lock() - s.values = append(s.values, addr) - s.mu.Unlock() - return nil -} diff --git a/pkg/discovery/memory/memory_test.go b/pkg/discovery/memory/memory_test.go deleted file mode 100644 index c2da0a068..000000000 --- a/pkg/discovery/memory/memory_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package memory - -import ( - "testing" - - "github.com/docker/docker/pkg/discovery" - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type discoverySuite struct{} - -var _ = check.Suite(&discoverySuite{}) - -func (s *discoverySuite) TestWatch(c *check.C) { - d := &Discovery{} - d.Initialize("foo", 1000, 0, nil) - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - } - - c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - expected = discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - - c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} diff --git a/pkg/discovery/nodes/nodes.go b/pkg/discovery/nodes/nodes.go deleted file mode 100644 index c0e3c07b2..000000000 --- a/pkg/discovery/nodes/nodes.go +++ /dev/null @@ -1,54 +0,0 @@ -package nodes - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - entries discovery.Entries -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("nodes", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { - for _, input := range strings.Split(uris, ",") { - for _, ip := range discovery.Generate(input) { - entry, err := discovery.NewEntry(ip) - if err != nil { - return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) - } - s.entries = append(s.entries, entry) - } - } - - return nil -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - go func() { - defer close(ch) - ch <- s.entries - <-stopCh - }() - return ch, nil -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/pkg/discovery/nodes/nodes_test.go b/pkg/discovery/nodes/nodes_test.go deleted file mode 100644 index e26568cf5..000000000 --- a/pkg/discovery/nodes/nodes_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package nodes - -import ( - "testing" - - "github.com/docker/docker/pkg/discovery" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestInitialize(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) - c.Assert(len(d.entries), check.Equals, 2) - c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") - c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") -} - -func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) - c.Assert(len(d.entries), check.Equals, 5) - c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") - c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") - c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") - c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") - c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") -} - -func (s *DiscoverySuite) TestWatch(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - ch, _ := d.Watch(nil) - c.Assert(expected.Equals(<-ch), check.Equals, true) -} - -func (s *DiscoverySuite) TestRegister(c *check.C) { - d := &Discovery{} - c.Assert(d.Register("0.0.0.0"), check.NotNil) -} diff --git a/pkg/filenotify/filenotify.go b/pkg/filenotify/filenotify.go deleted file mode 100644 index 23befae67..000000000 --- a/pkg/filenotify/filenotify.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package filenotify provides a mechanism for watching file(s) for changes. -// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. -// These are wrapped up in a common interface so that either can be used interchangeably in your code. -package filenotify - -import "gopkg.in/fsnotify.v1" - -// FileWatcher is an interface for implementing file notification watchers -type FileWatcher interface { - Events() <-chan fsnotify.Event - Errors() <-chan error - Add(name string) error - Remove(name string) error - Close() error -} - -// New tries to use an fs-event watcher, and falls back to the poller if there is an error -func New() (FileWatcher, error) { - if watcher, err := NewEventWatcher(); err == nil { - return watcher, nil - } - return NewPollingWatcher(), nil -} - -// NewPollingWatcher returns a poll-based file watcher -func NewPollingWatcher() FileWatcher { - return &filePoller{ - events: make(chan fsnotify.Event), - errors: make(chan error), - } -} - -// NewEventWatcher returns an fs-event based file watcher -func NewEventWatcher() (FileWatcher, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, err - } - return &fsNotifyWatcher{watcher}, nil -} diff --git a/pkg/filenotify/fsnotify.go b/pkg/filenotify/fsnotify.go deleted file mode 100644 index 420388358..000000000 --- a/pkg/filenotify/fsnotify.go +++ /dev/null @@ -1,18 +0,0 @@ -package filenotify - -import "gopkg.in/fsnotify.v1" - -// fsNotify wraps the fsnotify package to satisfy the FileNotifer interface -type fsNotifyWatcher struct { - *fsnotify.Watcher -} - -// GetEvents returns the fsnotify event channel receiver -func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { - return w.Watcher.Events -} - -// GetErrors returns the fsnotify error channel receiver -func (w *fsNotifyWatcher) Errors() <-chan error { - return w.Watcher.Errors -} diff --git a/pkg/filenotify/poller.go b/pkg/filenotify/poller.go deleted file mode 100644 index 526108534..000000000 --- a/pkg/filenotify/poller.go +++ /dev/null @@ -1,204 +0,0 @@ -package filenotify - -import ( - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/Sirupsen/logrus" - - "gopkg.in/fsnotify.v1" -) - -var ( - // errPollerClosed is returned when the poller is closed - errPollerClosed = errors.New("poller is closed") - // errNoSuchPoller is returned when trying to remove a watch that doesn't exist - errNoSuchWatch = errors.New("poller does not exist") -) - -// watchWaitTime is the time to wait between file poll loops -const watchWaitTime = 200 * time.Millisecond - -// filePoller is used to poll files for changes, especially in cases where fsnotify -// can't be run (e.g. when inotify handles are exhausted) -// filePoller satisfies the FileWatcher interface -type filePoller struct { - // watches is the list of files currently being polled, close the associated channel to stop the watch - watches map[string]chan struct{} - // events is the channel to listen to for watch events - events chan fsnotify.Event - // errors is the channel to listen to for watch errors - errors chan error - // mu locks the poller for modification - mu sync.Mutex - // closed is used to specify when the poller has already closed - closed bool -} - -// Add adds a filename to the list of watches -// once added the file is polled for changes in a separate goroutine -func (w *filePoller) Add(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed == true { - return errPollerClosed - } - - f, err := os.Open(name) - if err != nil { - return err - } - fi, err := os.Stat(name) - if err != nil { - return err - } - - if w.watches == nil { - w.watches = make(map[string]chan struct{}) - } - if _, exists := w.watches[name]; exists { - return fmt.Errorf("watch exists") - } - chClose := make(chan struct{}) - w.watches[name] = chClose - - go w.watch(f, fi, chClose) - return nil -} - -// Remove stops and removes watch with the specified name -func (w *filePoller) Remove(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - return w.remove(name) -} - -func (w *filePoller) remove(name string) error { - if w.closed == true { - return errPollerClosed - } - - chClose, exists := w.watches[name] - if !exists { - return errNoSuchWatch - } - close(chClose) - delete(w.watches, name) - return nil -} - -// Events returns the event channel -// This is used for notifications on events about watched files -func (w *filePoller) Events() <-chan fsnotify.Event { - return w.events -} - -// Errors returns the errors channel -// This is used for notifications about errors on watched files -func (w *filePoller) Errors() <-chan error { - return w.errors -} - -// Close closes the poller -// All watches are stopped, removed, and the poller cannot be added to -func (w *filePoller) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed { - return nil - } - - w.closed = true - for name := range w.watches { - w.remove(name) - delete(w.watches, name) - } - return nil -} - -// sendEvent publishes the specified event to the events channel -func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { - select { - case w.events <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// sendErr publishes the specified error to the errors channel -func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { - select { - case w.errors <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// watch is responsible for polling the specified file for changes -// upon finding changes to a file or errors, sendEvent/sendErr is called -func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { - defer f.Close() - for { - time.Sleep(watchWaitTime) - select { - case <-chClose: - logrus.Debugf("watch for %s closed", f.Name()) - return - default: - } - - fi, err := os.Stat(f.Name()) - if err != nil { - // if we got an error here and lastFi is not set, we can presume that nothing has changed - // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called - if lastFi == nil { - continue - } - // If it doesn't exist at this point, it must have been removed - // no need to send the error here since this is a valid operation - if os.IsNotExist(err) { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { - return - } - lastFi = nil - continue - } - // at this point, send the error - if err := w.sendErr(err, chClose); err != nil { - return - } - continue - } - - if lastFi == nil { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.Mode() != lastFi.Mode() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - } -} diff --git a/pkg/filenotify/poller_test.go b/pkg/filenotify/poller_test.go deleted file mode 100644 index 4f5026237..000000000 --- a/pkg/filenotify/poller_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package filenotify - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "testing" - "time" - - "gopkg.in/fsnotify.v1" -) - -func TestPollerAddRemove(t *testing.T) { - w := NewPollingWatcher() - - if err := w.Add("no-such-file"); err == nil { - t.Fatal("should have gotten error when adding a non-existent file") - } - if err := w.Remove("no-such-file"); err == nil { - t.Fatal("should have gotten error when removing non-existent watch") - } - - f, err := ioutil.TempFile("", "asdf") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(f.Name()) - - if err := w.Add(f.Name()); err != nil { - t.Fatal(err) - } - - if err := w.Remove(f.Name()); err != nil { - t.Fatal(err) - } -} - -func TestPollerEvent(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("No chmod on Windows") - } - w := NewPollingWatcher() - - f, err := ioutil.TempFile("", "test-poller") - if err != nil { - t.Fatal("error creating temp file") - } - defer os.RemoveAll(f.Name()) - f.Close() - - if err := w.Add(f.Name()); err != nil { - t.Fatal(err) - } - - select { - case <-w.Events(): - t.Fatal("got event before anything happened") - case <-w.Errors(): - t.Fatal("got error before anything happened") - default: - } - - if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Write); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(f.Name(), 600); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Chmod); err != nil { - t.Fatal(err) - } - - if err := os.Remove(f.Name()); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Remove); err != nil { - t.Fatal(err) - } -} - -func TestPollerClose(t *testing.T) { - w := NewPollingWatcher() - if err := w.Close(); err != nil { - t.Fatal(err) - } - // test double-close - if err := w.Close(); err != nil { - t.Fatal(err) - } - - f, err := ioutil.TempFile("", "asdf") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(f.Name()) - if err := w.Add(f.Name()); err == nil { - t.Fatal("should have gotten error adding watch for closed watcher") - } -} - -func assertEvent(w FileWatcher, eType fsnotify.Op) error { - var err error - select { - case e := <-w.Events(): - if e.Op != eType { - err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) - } - case e := <-w.Errors(): - err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) - case <-time.After(watchWaitTime * 3): - err = fmt.Errorf("timeout waiting for event %v", eType) - } - return err -} diff --git a/pkg/gitutils/gitutils.go b/pkg/gitutils/gitutils.go deleted file mode 100644 index ded091f2a..000000000 --- a/pkg/gitutils/gitutils.go +++ /dev/null @@ -1,100 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" -) - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - u, err := url.Parse(remoteURL) - if err != nil { - return "", err - } - - fragment := u.Fragment - clone := cloneArgs(u, root) - - if output, err := git(clone...); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - return checkoutGit(fragment, root) -} - -func cloneArgs(remoteURL *url.URL, root string) []string { - args := []string{"clone", "--recursive"} - shallow := len(remoteURL.Fragment) == 0 - - if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { - res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) - if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - shallow = false - } - } - - if shallow { - args = append(args, "--depth", "1") - } - - if remoteURL.Fragment != "" { - remoteURL.Fragment = "" - } - - return append(args, remoteURL.String(), root) -} - -func checkoutGit(fragment, root string) (string, error) { - refAndDir := strings.SplitN(fragment, ":", 2) - - if len(refAndDir[0]) != 0 { - if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) - if err != nil { - return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} diff --git a/pkg/gitutils/gitutils_test.go b/pkg/gitutils/gitutils_test.go deleted file mode 100644 index d197058d2..000000000 --- a/pkg/gitutils/gitutils_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -func TestCloneArgsSmartHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query().Get("service") - w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsDumbHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsGit(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsStripFragment(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker#test") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func gitGetConfig(name string) string { - b, err := git([]string{"config", "--get", name}...) - if err != nil { - // since we are interested in empty or non empty string, - // we can safely ignore the err here. - return "" - } - return strings.TrimSpace(string(b)) -} - -func TestCheckoutGit(t *testing.T) { - root, err := ioutil.TempDir("", "docker-build-git-checkout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - autocrlf := gitGetConfig("core.autocrlf") - if !(autocrlf == "true" || autocrlf == "false" || - autocrlf == "input" || autocrlf == "") { - t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) - } - eol := "\n" - if autocrlf == "true" { - eol = "\r\n" - } - - gitDir := filepath.Join(root, "repo") - _, err = git("init", gitDir) - if err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { - t.Fatal(err) - } - - subDir := filepath.Join(gitDir, "subdir") - if err = os.Mkdir(subDir, 0755); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if runtime.GOOS != "windows" { - if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { - t.Fatal(err) - } - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { - t.Fatal(err) - } - - type singleCase struct { - frag string - exp string - fail bool - } - - cases := []singleCase{ - {"", "FROM scratch", false}, - {"master", "FROM scratch", false}, - {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {":nosubdir", "", true}, // missing directory error - {":Dockerfile", "", true}, // not a directory error - {"master:nosubdir", "", true}, - {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {"master:../subdir", "", true}, - {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, - } - - if runtime.GOOS != "windows" { - // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below - // git --work-tree .\repo --git-dir .\repo\.git add -A - // error: readlink("absolutelink"): Function not implemented - // error: unable to index file absolutelink - // fatal: adding files failed - cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - } - - for _, c := range cases { - r, err := checkoutGit(c.frag, gitDir) - - fail := err != nil - if fail != c.fail { - t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) - } - if c.fail { - continue - } - - b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - if err != nil { - t.Fatal(err) - } - - if string(b) != c.exp { - t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) - } - } -} diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go deleted file mode 100644 index dbcf44c25..000000000 --- a/pkg/graphdb/conn_sqlite3.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build cgo - -package graphdb - -import "database/sql" - -// NewSqliteConn opens a connection to a sqlite -// database. -func NewSqliteConn(root string) (*Database, error) { - conn, err := sql.Open("sqlite3", root) - if err != nil { - return nil, err - } - return NewDatabase(conn) -} diff --git a/pkg/graphdb/conn_sqlite3_unix.go b/pkg/graphdb/conn_sqlite3_unix.go deleted file mode 100644 index f932fff28..000000000 --- a/pkg/graphdb/conn_sqlite3_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cgo,!windows - -package graphdb - -import ( - _ "github.com/mattn/go-sqlite3" // registers sqlite -) diff --git a/pkg/graphdb/conn_sqlite3_windows.go b/pkg/graphdb/conn_sqlite3_windows.go deleted file mode 100644 index 52590303d..000000000 --- a/pkg/graphdb/conn_sqlite3_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cgo,windows - -package graphdb - -import ( - _ "github.com/mattn/go-sqlite3" // registers sqlite -) diff --git a/pkg/graphdb/conn_unsupported.go b/pkg/graphdb/conn_unsupported.go deleted file mode 100644 index cf977050d..000000000 --- a/pkg/graphdb/conn_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !cgo - -package graphdb - -// NewSqliteConn return a new sqlite connection. -func NewSqliteConn(root string) (*Database, error) { - panic("Not implemented") -} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go deleted file mode 100644 index eca433fa8..000000000 --- a/pkg/graphdb/graphdb.go +++ /dev/null @@ -1,551 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "path" - "strings" - "sync" -) - -const ( - createEntityTable = ` - CREATE TABLE IF NOT EXISTS entity ( - id text NOT NULL PRIMARY KEY - );` - - createEdgeTable = ` - CREATE TABLE IF NOT EXISTS edge ( - "entity_id" text NOT NULL, - "parent_id" text NULL, - "name" text NOT NULL, - CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), - CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") - ); - ` - - createEdgeIndices = ` - CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); - ` -) - -// Entity with a unique id. -type Entity struct { - id string -} - -// An Edge connects two entities together. -type Edge struct { - EntityID string - Name string - ParentID string -} - -// Entities stores the list of entities. -type Entities map[string]*Entity - -// Edges stores the relationships between entities. -type Edges []*Edge - -// WalkFunc is a function invoked to process an individual entity. -type WalkFunc func(fullPath string, entity *Entity) error - -// Database is a graph database for storing entities and their relationships. -type Database struct { - conn *sql.DB - mux sync.RWMutex -} - -// IsNonUniqueNameError processes the error to check if it's caused by -// a constraint violation. -// This is necessary because the error isn't the same across various -// sqlite versions. -func IsNonUniqueNameError(err error) bool { - str := err.Error() - // sqlite 3.7.17-1ubuntu1 returns: - // Set failure: Abort due to constraint violation: columns parent_id, name are not unique - if strings.HasSuffix(str, "name are not unique") { - return true - } - // sqlite-3.8.3-1.fc20 returns: - // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name - if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { - return true - } - // sqlite-3.6.20-1.el6 returns: - // Set failure: Abort due to constraint violation: constraint failed - if strings.HasSuffix(str, "constraint failed") { - return true - } - return false -} - -// NewDatabase creates a new graph database initialized with a root entity. -func NewDatabase(conn *sql.DB) (*Database, error) { - if conn == nil { - return nil, fmt.Errorf("Database connection cannot be nil") - } - db := &Database{conn: conn} - - // Create root entities - tx, err := conn.Begin() - if err != nil { - return nil, err - } - - if _, err := tx.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeIndices); err != nil { - return nil, err - } - - if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - - return db, nil -} - -// Close the underlying connection to the database. -func (db *Database) Close() error { - return db.conn.Close() -} - -// Set the entity id for a given path. -func (db *Database) Set(fullPath, id string) (*Entity, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return nil, err - } - - var entityID string - if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { - if err == sql.ErrNoRows { - if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { - tx.Rollback() - return nil, err - } - } else { - tx.Rollback() - return nil, err - } - } - e := &Entity{id} - - parentPath, name := splitPath(fullPath) - if err := db.setEdge(parentPath, name, e, tx); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - return e, nil -} - -// Exists returns true if a name already exists in the database. -func (db *Database) Exists(name string) bool { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return false - } - return e != nil -} - -func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { - parent, err := db.get(parentPath) - if err != nil { - return err - } - if parent.id == e.id { - return fmt.Errorf("Cannot set self as child") - } - - if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { - return err - } - return nil -} - -// RootEntity returns the root "/" entity for the database. -func (db *Database) RootEntity() *Entity { - return &Entity{ - id: "0", - } -} - -// Get returns the entity for a given path. -func (db *Database) Get(name string) *Entity { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil - } - return e -} - -func (db *Database) get(name string) (*Entity, error) { - e := db.RootEntity() - // We always know the root name so return it if - // it is requested - if name == "/" { - return e, nil - } - - parts := split(name) - for i := 1; i < len(parts); i++ { - p := parts[i] - if p == "" { - continue - } - - next := db.child(e, p) - if next == nil { - return nil, fmt.Errorf("Cannot find child for %s", name) - } - e = next - } - return e, nil - -} - -// List all entities by from the name. -// The key will be the full path of the entity. -func (db *Database) List(name string, depth int) Entities { - db.mux.RLock() - defer db.mux.RUnlock() - - out := Entities{} - e, err := db.get(name) - if err != nil { - return out - } - - children, err := db.children(e, name, depth, nil) - if err != nil { - return out - } - - for _, c := range children { - out[c.FullPath] = c.Entity - } - return out -} - -// Walk through the child graph of an entity, calling walkFunc for each child entity. -// It is safe for walkFunc to call graph functions. -func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { - children, err := db.Children(name, depth) - if err != nil { - return err - } - - // Note: the database lock must not be held while calling walkFunc - for _, c := range children { - if err := walkFunc(c.FullPath, c.Entity); err != nil { - return err - } - } - return nil -} - -// Children returns the children of the specified entity. -func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - - return db.children(e, name, depth, nil) -} - -// Parents returns the parents of a specified entity. -func (db *Database) Parents(name string) ([]string, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - return db.parents(e) -} - -// Refs returns the reference count for a specified id. -func (db *Database) Refs(id string) int { - db.mux.RLock() - defer db.mux.RUnlock() - - var count int - if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { - return 0 - } - return count -} - -// RefPaths returns all the id's path references. -func (db *Database) RefPaths(id string) Edges { - db.mux.RLock() - defer db.mux.RUnlock() - - refs := Edges{} - - rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) - if err != nil { - return refs - } - defer rows.Close() - - for rows.Next() { - var name string - var parentID string - if err := rows.Scan(&name, &parentID); err != nil { - return refs - } - refs = append(refs, &Edge{ - EntityID: id, - Name: name, - ParentID: parentID, - }) - } - return refs -} - -// Delete the reference to an entity at a given path. -func (db *Database) Delete(name string) error { - db.mux.Lock() - defer db.mux.Unlock() - - if name == "/" { - return fmt.Errorf("Cannot delete root entity") - } - - parentPath, n := splitPath(name) - parent, err := db.get(parentPath) - if err != nil { - return err - } - - if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { - return err - } - return nil -} - -// Purge removes the entity with the specified id -// Walk the graph to make sure all references to the entity -// are removed and return the number of references removed -func (db *Database) Purge(id string) (int, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return -1, err - } - - // Delete all edges - rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - changes, err := rows.RowsAffected() - if err != nil { - return -1, err - } - - // Clear who's using this id as parent - refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - refsCount, err := refs.RowsAffected() - if err != nil { - return -1, err - } - - // Delete entity - if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { - tx.Rollback() - return -1, err - } - - if err := tx.Commit(); err != nil { - return -1, err - } - - return int(changes + refsCount), nil -} - -// Rename an edge for a given path -func (db *Database) Rename(currentName, newName string) error { - db.mux.Lock() - defer db.mux.Unlock() - - parentPath, name := splitPath(currentName) - newParentPath, newEdgeName := splitPath(newName) - - if parentPath != newParentPath { - return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) - } - - parent, err := db.get(parentPath) - if err != nil { - return err - } - - rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) - if err != nil { - return err - } - i, err := rows.RowsAffected() - if err != nil { - return err - } - if i == 0 { - return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) - } - return nil -} - -// WalkMeta stores the walk metadata. -type WalkMeta struct { - Parent *Entity - Entity *Entity - FullPath string - Edge *Edge -} - -func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { - if e == nil { - return entities, nil - } - - rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var entityID, entityName string - if err := rows.Scan(&entityID, &entityName); err != nil { - return nil, err - } - child := &Entity{entityID} - edge := &Edge{ - ParentID: e.id, - Name: entityName, - EntityID: child.id, - } - - meta := WalkMeta{ - Parent: e, - Entity: child, - FullPath: path.Join(name, edge.Name), - Edge: edge, - } - - entities = append(entities, meta) - - if depth != 0 { - nDepth := depth - if depth != -1 { - nDepth-- - } - entities, err = db.children(child, meta.FullPath, nDepth, entities) - if err != nil { - return nil, err - } - } - } - - return entities, nil -} - -func (db *Database) parents(e *Entity) (parents []string, err error) { - if e == nil { - return parents, nil - } - - rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var parentID string - if err := rows.Scan(&parentID); err != nil { - return nil, err - } - parents = append(parents, parentID) - } - - return parents, nil -} - -// Return the entity based on the parent path and name. -func (db *Database) child(parent *Entity, name string) *Entity { - var id string - if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { - return nil - } - return &Entity{id} -} - -// ID returns the id used to reference this entity. -func (e *Entity) ID() string { - return e.id -} - -// Paths returns the paths sorted by depth. -func (e Entities) Paths() []string { - out := make([]string, len(e)) - var i int - for k := range e { - out[i] = k - i++ - } - sortByDepth(out) - - return out -} diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go deleted file mode 100644 index f0fb074b4..000000000 --- a/pkg/graphdb/graphdb_test.go +++ /dev/null @@ -1,721 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "os" - "path" - "runtime" - "strconv" - "testing" - - _ "github.com/mattn/go-sqlite3" -) - -func newTestDb(t *testing.T) (*Database, string) { - p := path.Join(os.TempDir(), "sqlite.db") - conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn) - if err != nil { - t.Fatal(err) - } - return db, p -} - -func destroyTestDb(dbPath string) { - os.Remove(dbPath) -} - -func TestNewDatabase(t *testing.T) { - db, dbpath := newTestDb(t) - if db == nil { - t.Fatal("Database should not be nil") - } - db.Close() - defer destroyTestDb(dbpath) -} - -func TestCreateRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - root := db.RootEntity() - if root == nil { - t.Fatal("Root entity should not be nil") - } -} - -func TestGetRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - e := db.Get("/") - if e == nil { - t.Fatal("Entity should not be nil") - } - if e.ID() != "0" { - t.Fatalf("Entity id should be 0, got %s", e.ID()) - } -} - -func TestSetEntityWithDifferentName(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/test", "1") - if _, err := db.Set("/other", "1"); err != nil { - t.Fatal(err) - } -} - -func TestSetDuplicateEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/foo", "42"); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/foo", "43"); err == nil { - t.Fatalf("Creating an entry with a duplicate path did not cause an error") - } -} - -func TestCreateChild(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/db", "1") - if err != nil { - t.Fatal(err) - } - if child == nil { - t.Fatal("Child should not be nil") - } - if child.ID() != "1" { - t.Fail() - } -} - -func TestParents(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - - for i := 6; i < 11; i++ { - a := strconv.Itoa(i) - p := strconv.Itoa(i - 5) - - key := fmt.Sprintf("/%s/%s", p, a) - - if _, err := db.Set(key, a); err != nil { - t.Fatal(err) - } - - parents, err := db.Parents(key) - if err != nil { - t.Fatal(err) - } - - if len(parents) != 1 { - t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) - } - - if parents[0] != p { - t.Fatalf("ID %s received, %s expected", parents[0], p) - } - } -} - -func TestChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - str := "/" - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - - str = "/" - for i := 10; i < 30; i++ { // 20 entities - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - entries, err := db.Children("/", 5) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 11 { - t.Fatalf("Expect 11 entries for / got %d", len(entries)) - } - - entries, err = db.Children("/", 20) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 25 { - t.Fatalf("Expect 25 entries for / got %d", len(entries)) - } -} - -func TestListAllRootChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - entries := db.List("/", -1) - if len(entries) != 5 { - t.Fatalf("Expect 5 entries for / got %d", len(entries)) - } -} - -func TestListAllSubChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - entries := db.List("/webapp", 1) - if len(entries) != 3 { - t.Fatalf("Expect 3 entries for / got %d", len(entries)) - } - - entries = db.List("/webapp", 0) - if len(entries) != 2 { - t.Fatalf("Expect 2 entries for / got %d", len(entries)) - } -} - -func TestAddSelfAsChild(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/test", "1") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/test/other", child.ID()); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestAddChildToNonExistentRoot(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestWalkAll(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/db/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Walk("/", func(p string, e *Entity) error { - t.Logf("Path: %s Entity: %s", p, e.ID()) - return nil - }, -1); err != nil { - t.Fatal(err) - } -} - -func TestGetEntityByPath(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - entity := db.Get("/webapp/db/logs") - if entity == nil { - t.Fatal("Entity should not be nil") - } - if entity.ID() != "4" { - t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) - } -} - -func TestEnitiesPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - out := db.List("/", -1) - for _, p := range out.Paths() { - t.Log(p) - } -} - -func TestDeleteRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if err := db.Delete("/"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestDeleteEntity(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Delete("/webapp/sentry"); err != nil { - t.Fatal(err) - } - entity := db.Get("/webapp/sentry") - if entity != nil { - t.Fatal("Entity /webapp/sentry should be nil") - } -} - -func TestCountRefs(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - if db.Refs("2") != 2 { - t.Fatal("Expect reference count to be 2") - } -} - -func TestPurgeId(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expect reference count to be 1, got %d", c) - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Fatalf("Expected 2 references to be removed, got %d", count) - } -} - -// Regression test https://github.com/docker/docker/issues/12334 -func TestPurgeIdRefPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - db.Set("/db", "2") - - db.Set("/db/webapp", "1") - - if c := db.Refs("1"); c != 2 { - t.Fatalf("Expected 2 reference for webapp, got %d", c) - } - if c := db.Refs("2"); c != 1 { - t.Fatalf("Expected 1 reference for db, got %d", c) - } - - if rp := db.RefPaths("2"); len(rp) != 1 { - t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) - } - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - - if count != 2 { - t.Fatalf("Expected 2 rows to be removed, got %d", count) - } - - if c := db.Refs("2"); c != 0 { - t.Fatalf("Expected 0 reference for db, got %d", c) - } - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expected 1 reference for webapp, got %d", c) - } -} - -func TestRename(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - if db.Get("/webapp/db") == nil { - t.Fatal("Cannot find entity at path /webapp/db") - } - - if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { - t.Fatal(err) - } - if db.Get("/webapp/db") != nil { - t.Fatal("Entity should not exist at /webapp/db") - } - if db.Get("/webapp/newdb") == nil { - t.Fatal("Cannot find entity at path /webapp/newdb") - } - -} - -func TestCreateMultipleNames(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/db", "1") - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - db.Walk("/", func(p string, e *Entity) error { - t.Logf("%s\n", p) - return nil - }, -1) -} - -func TestRefPaths(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - refs := db.RefPaths("2") - if len(refs) != 2 { - t.Fatalf("Expected reference count to be 2, got %d", len(refs)) - } -} - -func TestExistsTrue(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/testing", "1") - - if !db.Exists("/testing") { - t.Fatalf("/tesing should exist") - } -} - -func TestExistsFalse(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/toerhe", "1") - - if db.Exists("/testing") { - t.Fatalf("/tesing should not exist") - } - -} - -func TestGetNameWithTrailingSlash(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/todo", "1") - - e := db.Get("/todo/") - if e == nil { - t.Fatalf("Entity should not be nil") - } -} - -func TestConcurrentWrites(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - errs := make(chan error, 2) - - save := func(name string, id string) { - if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { - errs <- err - } - errs <- nil - } - purge := func(id string) { - if _, err := db.Purge(id); err != nil { - errs <- err - } - errs <- nil - } - - save("/1", "1") - - go purge("1") - go save("/2", "2") - - any := false - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - any = true - t.Log(err) - } - } - if any { - t.Fail() - } -} diff --git a/pkg/graphdb/sort.go b/pkg/graphdb/sort.go deleted file mode 100644 index c07df077d..000000000 --- a/pkg/graphdb/sort.go +++ /dev/null @@ -1,27 +0,0 @@ -package graphdb - -import "sort" - -type pathSorter struct { - paths []string - by func(i, j string) bool -} - -func sortByDepth(paths []string) { - s := &pathSorter{paths, func(i, j string) bool { - return PathDepth(i) > PathDepth(j) - }} - sort.Sort(s) -} - -func (s *pathSorter) Len() int { - return len(s.paths) -} - -func (s *pathSorter) Swap(i, j int) { - s.paths[i], s.paths[j] = s.paths[j], s.paths[i] -} - -func (s *pathSorter) Less(i, j int) bool { - return s.by(s.paths[i], s.paths[j]) -} diff --git a/pkg/graphdb/sort_test.go b/pkg/graphdb/sort_test.go deleted file mode 100644 index ddf2266f6..000000000 --- a/pkg/graphdb/sort_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package graphdb - -import ( - "testing" -) - -func TestSort(t *testing.T) { - paths := []string{ - "/", - "/myreallylongname", - "/app/db", - } - - sortByDepth(paths) - - if len(paths) != 3 { - t.Fatalf("Expected 3 parts got %d", len(paths)) - } - - if paths[0] != "/app/db" { - t.Fatalf("Expected /app/db got %s", paths[0]) - } - if paths[1] != "/myreallylongname" { - t.Fatalf("Expected /myreallylongname got %s", paths[1]) - } - if paths[2] != "/" { - t.Fatalf("Expected / got %s", paths[2]) - } -} diff --git a/pkg/graphdb/utils.go b/pkg/graphdb/utils.go deleted file mode 100644 index 9edd79c35..000000000 --- a/pkg/graphdb/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package graphdb - -import ( - "path" - "strings" -) - -// Split p on / -func split(p string) []string { - return strings.Split(p, "/") -} - -// PathDepth returns the depth or number of / in a given path -func PathDepth(p string) int { - parts := split(p) - if len(parts) == 2 && parts[1] == "" { - return 1 - } - return len(parts) -} - -func splitPath(p string) (parent, name string) { - if p[0] != '/' { - p = "/" + p - } - parent, name = path.Split(p) - l := len(parent) - if parent[l-1] == '/' { - parent = parent[:l-1] - } - return -} diff --git a/pkg/httputils/httputils.go b/pkg/httputils/httputils.go deleted file mode 100644 index d7dc43877..000000000 --- a/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/pkg/httputils/httputils_test.go b/pkg/httputils/httputils_test.go deleted file mode 100644 index d35d08215..000000000 --- a/pkg/httputils/httputils_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package httputils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDownload(t *testing.T) { - expected := "Hello, docker !" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, expected) - })) - defer ts.Close() - response, err := Download(ts.URL) - if err != nil { - t.Fatal(err) - } - - actual, err := ioutil.ReadAll(response.Body) - response.Body.Close() - - if err != nil || string(actual) != expected { - t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) - } -} - -func TestDownload400Errors(t *testing.T) { - expectedError := "Got HTTP status code >= 400: 403 Forbidden" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, "something failed (forbidden)", http.StatusForbidden) - })) - defer ts.Close() - // Expected status code = 403 - if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { - t.Fatalf("Expected the the error %q, got %v", expectedError, err) - } -} - -func TestDownloadOtherErrors(t *testing.T) { - if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { - t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) - } -} - -func TestNewHTTPRequestError(t *testing.T) { - errorMessage := "Some error message" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, errorMessage, http.StatusForbidden) - })) - defer ts.Close() - httpResponse, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { - t.Fatalf("Expected err to be %q, got %v", errorMessage, err) - } -} - -func TestParseServerHeader(t *testing.T) { - inputs := map[string][]string{ - "bad header": {"error"}, - "(bad header)": {"error"}, - "(without/spaces)": {"error"}, - "(header/with spaces)": {"error"}, - "foo/bar (baz)": {"foo", "bar", "baz"}, - "foo/bar": {"error"}, - "foo": {"error"}, - "foo/bar (baz space)": {"foo", "bar", "baz space"}, - " f f / b b ( b s ) ": {"f f", "b b", "b s"}, - "foo/bar (baz) ignore": {"foo", "bar", "baz"}, - "foo/bar ()": {"error"}, - "foo/bar()": {"error"}, - "foo/bar(baz)": {"foo", "bar", "baz"}, - "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, - "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, - "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, - } - - for header, values := range inputs { - serverHeader, err := ParseServerHeader(header) - if err != nil { - if err != errInvalidHeader { - t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) - } - if values[0] == "error" { - continue - } - t.Fatalf("Header %q failed to parse when it shouldn't have", header) - } - if values[0] == "error" { - t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) - } - - if serverHeader.App != values[0] { - t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) - } - - if serverHeader.Ver != values[1] { - t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) - } - - if serverHeader.OS != values[2] { - t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) - } - - } - -} diff --git a/pkg/httputils/mimetype.go b/pkg/httputils/mimetype.go deleted file mode 100644 index d5cf34e4f..000000000 --- a/pkg/httputils/mimetype.go +++ /dev/null @@ -1,30 +0,0 @@ -package httputils - -import ( - "mime" - "net/http" -) - -// MimeTypes stores the MIME content type. -var MimeTypes = struct { - TextPlain string - Tar string - OctetStream string -}{"text/plain", "application/tar", "application/octet-stream"} - -// DetectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - - return contentType, args, nil -} diff --git a/pkg/httputils/mimetype_test.go b/pkg/httputils/mimetype_test.go deleted file mode 100644 index 9de433ee8..000000000 --- a/pkg/httputils/mimetype_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package httputils - -import ( - "testing" -) - -func TestDetectContentType(t *testing.T) { - input := []byte("That is just a plain text") - - if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { - t.Errorf("TestDetectContentType failed") - } -} diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go deleted file mode 100644 index bebc8608c..000000000 --- a/pkg/httputils/resumablerequestreader.go +++ /dev/null @@ -1,95 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/Sirupsen/logrus" -) - -type resumableRequestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 -} - -// ResumableRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} -} - -// ResumableRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} -} - -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(5 * time.Second) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(5 * time.Duration(r.failures) * time.Second) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *resumableRequestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *resumableRequestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/pkg/httputils/resumablerequestreader_test.go b/pkg/httputils/resumablerequestreader_test.go deleted file mode 100644 index 5a2906db7..000000000 --- a/pkg/httputils/resumablerequestreader_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestResumableRequestHeaderSimpleErrors(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, world !") - })) - defer ts.Close() - - client := &http.Client{} - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - expectedError := "client and request can't be nil\n" - resreq := &resumableRequestReader{} - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - - resreq = &resumableRequestReader{ - client: client, - request: req, - totalSize: -1, - } - expectedError = "failed to auto detect content length" - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - -} - -// Not too much failures, bails out after some wait -func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 2, - } - read, err := resreq.Read([]byte{}) - if err != nil || read != 0 { - t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) - } -} - -// Too much failures, returns the error -func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 1, - } - defer resreq.Close() - - expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` - read, err := resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError || read != 0 { - t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) - } -} - -type errorReaderCloser struct{} - -func (errorReaderCloser) Close() error { return nil } - -func (errorReaderCloser) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("An error occurred") -} - -// If an unknown error is encountered, return 0, nil and log it -func TestResumableRequestReaderWithReadError(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "500 Internal Server", - StatusCode: 500, - ContentLength: 0, - Close: true, - Body: errorReaderCloser{}, - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - read, err := resreq.Read(buf) - if err != nil { - t.Fatal(err) - } - - if read != 0 { - t.Fatalf("Expected to have read nothing, but read %v", read) - } -} - -func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "416 Requested Range Not Satisfiable", - StatusCode: 416, - ContentLength: 0, - Close: true, - Body: ioutil.NopCloser(strings.NewReader("")), - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - _, err = resreq.Read(buf) - if err == nil || err != io.EOF { - t.Fatalf("Expected an io.EOF error, got %v", err) - } -} - -func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Range") == "" { - t.Fatalf("Expected a Range HTTP header, got nothing") - } - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - resreq := &resumableRequestReader{ - client: client, - request: req, - lastRange: 1, - } - defer resreq.Close() - - buf := make([]byte, 2) - _, err = resreq.Read(buf) - if err == nil || err.Error() != "the server doesn't support byte ranges" { - t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) - } -} - -func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - - resreq := ResumableRequestReader(client, req, retries, 0) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReader(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - resreq := ResumableRequestReader(client, req, retries, imgSize) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReaderWithInitialResponse(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - res, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - - resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c3111..000000000 --- a/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8..000000000 --- a/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/pkg/jsonlog/jsonlog_marshalling_test.go b/pkg/jsonlog/jsonlog_marshalling_test.go deleted file mode 100644 index 3edb27141..000000000 --- a/pkg/jsonlog/jsonlog_marshalling_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package jsonlog - -import ( - "regexp" - "testing" -) - -func TestJSONLogMarshalJSON(t *testing.T) { - logs := map[*JSONLog]string{ - &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, - &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, - &JSONLog{}: `^{\"time\":\".{20,}\"}$`, - // These ones are a little weird - &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, - } - for jsonLog, expression := range logs { - data, err := jsonLog.MarshalJSON() - if err != nil { - t.Fatal(err) - } - res := string(data) - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index df522c0d6..000000000 --- a/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/pkg/jsonlog/jsonlogbytes_test.go b/pkg/jsonlog/jsonlogbytes_test.go deleted file mode 100644 index 6d6ad2158..000000000 --- a/pkg/jsonlog/jsonlogbytes_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package jsonlog - -import ( - "bytes" - "regexp" - "testing" -) - -func TestJSONLogsMarshalJSONBuf(t *testing.T) { - logs := map[*JSONLogs]string{ - &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, - &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Created: "time"}: `^{\"time\":time}$`, - &JSONLogs{}: `^{\"time\":}$`, - // These ones are a little weird - &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, - &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, - &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, - // with raw attributes - &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, - } - for jsonLog, expression := range logs { - var buf bytes.Buffer - if err := jsonLog.MarshalJSONBuf(&buf); err != nil { - t.Fatal(err) - } - res := buf.String() - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/pkg/jsonlog/time_marshalling.go b/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 211733814..000000000 --- a/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/pkg/jsonlog/time_marshalling_test.go b/pkg/jsonlog/time_marshalling_test.go deleted file mode 100644 index 02d0302c4..000000000 --- a/pkg/jsonlog/time_marshalling_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package jsonlog - -import ( - "testing" - "time" -) - -// Testing to ensure 'year' fields is between 0 and 9999 -func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { - aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) - json, err := FastTimeMarshalJSON(aTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) - json, err = FastTimeMarshalJSON(anotherTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - -} - -func TestFastTimeMarshalJSON(t *testing.T) { - aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) - json, err := FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected := "\"2015-05-29T11:01:02.000000003Z\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } - - location, err := time.LoadLocation("Europe/Paris") - if err != nil { - t.Fatal(err) - } - aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) - json, err = FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected = "\"2015-05-29T11:01:02.000000003+02:00\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } -} diff --git a/pkg/listeners/listeners_solaris.go b/pkg/listeners/listeners_solaris.go deleted file mode 100644 index ff833e374..000000000 --- a/pkg/listeners/listeners_solaris.go +++ /dev/null @@ -1,31 +0,0 @@ -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("Invalid protocol format: %q", proto) - } - - return -} diff --git a/pkg/listeners/listeners_unix.go b/pkg/listeners/listeners_unix.go deleted file mode 100644 index 1bcae7aa3..000000000 --- a/pkg/listeners/listeners_unix.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build !windows,!solaris - -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/activation" - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "fd": - fds, err := listenFD(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, fds...) - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("invalid protocol format: %q", proto) - } - - return ls, nil -} - -// listenFD returns the specified socket activated files as a slice of -// net.Listeners or all of the activated files if "*" is given. -func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { - var ( - err error - listeners []net.Listener - ) - // socket activation - if tlsConfig != nil { - listeners, err = activation.TLSListeners(false, tlsConfig) - } else { - listeners, err = activation.Listeners(false) - } - if err != nil { - return nil, err - } - - if len(listeners) == 0 { - return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") - } - - // default to all fds just like unix:// and tcp:// - if addr == "" || addr == "*" { - return listeners, nil - } - - fdNum, err := strconv.Atoi(addr) - if err != nil { - return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) - } - fdOffset := fdNum - 3 - if len(listeners) < int(fdOffset)+1 { - return nil, fmt.Errorf("too few socket activated files passed in by systemd") - } - if listeners[fdOffset] == nil { - return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) - } - for i, ls := range listeners { - if i == fdOffset || ls == nil { - continue - } - if err := ls.Close(); err != nil { - // TODO: We shouldn't log inside a library. Remove this or error out. - logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) - } - } - return []net.Listener{listeners[fdOffset]}, nil -} diff --git a/pkg/listeners/listeners_windows.go b/pkg/listeners/listeners_windows.go deleted file mode 100644 index 5b5a470fc..000000000 --- a/pkg/listeners/listeners_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - - case "npipe": - // allow Administrators and SYSTEM, plus whatever additional users or groups were specified - sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" - if socketGroup != "" { - for _, g := range strings.Split(socketGroup, ",") { - sid, err := winio.LookupSidByName(g) - if err != nil { - return nil, err - } - sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) - } - } - c := winio.PipeConfig{ - SecurityDescriptor: sddl, - MessageMode: true, // Use message mode so that CloseWrite() is supported - InputBufferSize: 65536, // Use 64KB buffers to improve performance - OutputBufferSize: 65536, - } - l, err := winio.ListenPipe(addr, &c) - if err != nil { - return nil, err - } - ls = append(ls, l) - - default: - return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") - } - - return ls, nil -} diff --git a/pkg/locker/README.md b/pkg/locker/README.md deleted file mode 100644 index e84a815cc..000000000 --- a/pkg/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - s.mu.Lock() - i.data[name] = data - s.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modfying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/pkg/locker/locker.go b/pkg/locker/locker.go deleted file mode 100644 index 0b22ddfab..000000000 --- a/pkg/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/pkg/locker/locker_test.go b/pkg/locker/locker_test.go deleted file mode 100644 index 5a297dd47..000000000 --- a/pkg/locker/locker_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package locker - -import ( - "sync" - "testing" - "time" -) - -func TestLockCounter(t *testing.T) { - l := &lockCtr{} - l.inc() - - if l.waiters != 1 { - t.Fatal("counter inc failed") - } - - l.dec() - if l.waiters != 0 { - t.Fatal("counter dec failed") - } -} - -func TestLockerLock(t *testing.T) { - l := New() - l.Lock("test") - ctr := l.locks["test"] - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) - } - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - chWaiting := make(chan struct{}) - go func() { - for range time.Tick(1 * time.Millisecond) { - if ctr.count() == 1 { - close(chWaiting) - break - } - } - }() - - select { - case <-chWaiting: - case <-time.After(3 * time.Second): - t.Fatal("timed out waiting for lock waiters to be incremented") - } - - select { - case <-chDone: - t.Fatal("lock should not have returned while it was still held") - default: - } - - if err := l.Unlock("test"); err != nil { - t.Fatal(err) - } - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should have completed") - } - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) - } -} - -func TestLockerUnlock(t *testing.T) { - l := New() - - l.Lock("test") - l.Unlock("test") - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should not be blocked") - } -} - -func TestLockerConcurrency(t *testing.T) { - l := New() - - var wg sync.WaitGroup - for i := 0; i <= 10000; i++ { - wg.Add(1) - go func() { - l.Lock("test") - // if there is a concurrency issue, will very likely panic here - l.Unlock("test") - wg.Done() - }() - } - - chDone := make(chan struct{}) - go func() { - wg.Wait() - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for locks to complete") - } - - // Since everything has unlocked this should not exist anymore - if ctr, exists := l.locks["test"]; exists { - t.Fatalf("lock should not exist: %v", ctr) - } -} diff --git a/pkg/namesgenerator/cmd/names-generator/main.go b/pkg/namesgenerator/cmd/names-generator/main.go deleted file mode 100644 index 18a939b70..000000000 --- a/pkg/namesgenerator/cmd/names-generator/main.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/docker/docker/pkg/namesgenerator" -) - -func main() { - fmt.Println(namesgenerator.GetRandomName(0)) -} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go deleted file mode 100644 index 6109f1b8e..000000000 --- a/pkg/namesgenerator/names-generator.go +++ /dev/null @@ -1,552 +0,0 @@ -package namesgenerator - -import ( - "fmt" - - "github.com/docker/docker/pkg/random" -) - -var ( - left = [...]string{ - "admiring", - "adoring", - "agitated", - "amazing", - "angry", - "awesome", - "backstabbing", - "berserk", - "big", - "boring", - "clever", - "cocky", - "compassionate", - "condescending", - "cranky", - "desperate", - "determined", - "distracted", - "dreamy", - "drunk", - "ecstatic", - "elated", - "elegant", - "evil", - "fervent", - "focused", - "furious", - "gigantic", - "gloomy", - "goofy", - "grave", - "happy", - "high", - "hopeful", - "hungry", - "infallible", - "jolly", - "jovial", - "kickass", - "lonely", - "loving", - "mad", - "modest", - "naughty", - "nauseous", - "nostalgic", - "peaceful", - "pedantic", - "pensive", - "prickly", - "reverent", - "romantic", - "sad", - "serene", - "sharp", - "sick", - "silly", - "sleepy", - "small", - "stoic", - "stupefied", - "suspicious", - "tender", - "thirsty", - "tiny", - "trusting", - "zen", - } - - // Docker, starting from 0.7.x, generates names from notable scientists and hackers. - // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. - right = [...]string{ - // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - - // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - - // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - - // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - - // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - - // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - - // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - - // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - - // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - - // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - - // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - - // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - - // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - - // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - - // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - - // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - - // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - - // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - - // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - - // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - - // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - - // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - - // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - - // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - - // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - - //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - - // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - - // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - - // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - - // This entry reflects a husband and wife team who worked together: - // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - - // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - - // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - - // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - - // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - - // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - - // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - - // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison - "edison", - - // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - - // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - - // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - - // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - - // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - - // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - - // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - - // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - - // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - - // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - - // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - - // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - - // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - - // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - - // James Golick, all around gangster. - "golick", - - // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - - // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - - // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - - // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - - // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - - // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - - // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - - // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - - // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - - // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - - // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - - // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - - // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - - // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - - // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - - // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - - // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - - // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - - // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - - // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - - // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - - // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - - // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - - // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - - // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - - // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - - // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - - // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - - // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - - // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - - // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - - // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - - // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - - // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - - // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - - // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - - // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - - // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - - // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - - // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - - // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - - // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - - // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - - // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - - // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform - "nightingale", - - // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - - // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - - // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - - // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - - // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - - // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - - // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - - // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - - // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - - // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - - // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - - // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - - // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - - // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - - // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - - // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - - // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - - // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - - // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - - // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - - // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - - // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - - // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - - // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - - // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - - // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - - // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - - // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - - // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - - // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - - // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - - // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - - // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - - // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - - // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - - // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - - // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles - "wiles", - - // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - - // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - - // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - - // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - - // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - - // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - - // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", - } -) - -// GetRandomName generates a random name from the list of adjectives and surnames in this package -// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random -// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` -func GetRandomName(retry int) string { - rnd := random.Rand -begin: - name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) - if name == "boring_wozniak" /* Steve Wozniak is not boring */ { - goto begin - } - - if retry > 0 { - name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) - } - return name -} diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go deleted file mode 100644 index d1a94977d..000000000 --- a/pkg/namesgenerator/names-generator_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package namesgenerator - -import ( - "strings" - "testing" -) - -func TestNameFormat(t *testing.T) { - name := GetRandomName(0) - if !strings.Contains(name, "_") { - t.Fatalf("Generated name does not contain an underscore") - } - if strings.ContainsAny(name, "0123456789") { - t.Fatalf("Generated name contains numbers!") - } -} - -func TestNameRetries(t *testing.T) { - name := GetRandomName(1) - if !strings.Contains(name, "_") { - t.Fatalf("Generated name does not contain an underscore") - } - if !strings.ContainsAny(name, "0123456789") { - t.Fatalf("Generated name doesn't contain a number") - } - -} diff --git a/pkg/platform/architecture_linux.go b/pkg/platform/architecture_linux.go deleted file mode 100644 index 2cdc2c591..000000000 --- a/pkg/platform/architecture_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform - -import ( - "syscall" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - utsname := &syscall.Utsname{} - if err := syscall.Uname(utsname); err != nil { - return "", err - } - return charsToString(utsname.Machine), nil -} diff --git a/pkg/platform/architecture_unix.go b/pkg/platform/architecture_unix.go deleted file mode 100644 index 45bbcf153..000000000 --- a/pkg/platform/architecture_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build freebsd solaris darwin - -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform - -import ( - "os/exec" - "strings" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) -func runtimeArchitecture() (string, error) { - cmd := exec.Command("/usr/bin/uname", "-m") - machine, err := cmd.Output() - if err != nil { - return "", err - } - return strings.TrimSpace(string(machine)), nil -} diff --git a/pkg/platform/architecture_windows.go b/pkg/platform/architecture_windows.go deleted file mode 100644 index 0dd8a2e41..000000000 --- a/pkg/platform/architecture_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package platform - -import ( - "fmt" - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") -) - -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx -type systeminfo struct { - wProcessorArchitecture uint16 - wReserved uint16 - dwPageSize uint32 - lpMinimumApplicationAddress uintptr - lpMaximumApplicationAddress uintptr - dwActiveProcessorMask uintptr - dwNumberOfProcessors uint32 - dwProcessorType uint32 - dwAllocationGranularity uint32 - wProcessorLevel uint16 - wProcessorRevision uint16 -} - -// Constants -const ( - ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 - ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 - ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL - ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM -) - -var sysinfo systeminfo - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) - switch sysinfo.wProcessorArchitecture { - case ProcessorArchitecture64, ProcessorArchitectureIA64: - return "x86_64", nil - case ProcessorArchitecture32: - return "i686", nil - case ProcessorArchitectureArm: - return "arm", nil - default: - return "", fmt.Errorf("Unknown processor architecture") - } -} diff --git a/pkg/platform/platform.go b/pkg/platform/platform.go deleted file mode 100644 index e4b03122f..000000000 --- a/pkg/platform/platform.go +++ /dev/null @@ -1,23 +0,0 @@ -package platform - -import ( - "runtime" - - "github.com/Sirupsen/logrus" -) - -var ( - // Architecture holds the runtime architecture of the process. - Architecture string - // OSType holds the runtime operating system type (Linux, …) of the process. - OSType string -) - -func init() { - var err error - Architecture, err = runtimeArchitecture() - if err != nil { - logrus.Errorf("Could not read system architecture info: %v", err) - } - OSType = runtime.GOOS -} diff --git a/pkg/platform/utsname_int8.go b/pkg/platform/utsname_int8.go deleted file mode 100644 index 5dcbadfdf..000000000 --- a/pkg/platform/utsname_int8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,386 linux,amd64 linux,arm64 -// see golang's sources src/syscall/ztypes_linux_*.go that use int8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of signed int8 -func charsToString(ca [65]int8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = uint8(ca[lens]) - } - return string(s[0:lens]) -} diff --git a/pkg/platform/utsname_uint8.go b/pkg/platform/utsname_uint8.go deleted file mode 100644 index c9875cf6e..000000000 --- a/pkg/platform/utsname_uint8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,arm linux,ppc64 linux,ppc64le s390x -// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of unsigned uint8 -func charsToString(ca [65]uint8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = ca[lens] - } - return string(s[0:lens]) -} diff --git a/pkg/progress/progress.go b/pkg/progress/progress.go deleted file mode 100644 index df3c2ba91..000000000 --- a/pkg/progress/progress.go +++ /dev/null @@ -1,73 +0,0 @@ -package progress - -import ( - "fmt" -) - -// Progress represents the progress of a transfer. -type Progress struct { - ID string - - // Progress contains a Message or... - Message string - - // ...progress of an action - Action string - Current int64 - Total int64 - - // Aux contains extra information not presented to the user, such as - // digests for push signing. - Aux interface{} - - LastUpdate bool -} - -// Output is an interface for writing progress information. It's -// like a writer for progress, but we don't call it Writer because -// that would be confusing next to ProgressReader (also, because it -// doesn't implement the io.Writer interface). -type Output interface { - WriteProgress(Progress) error -} - -type chanOutput chan<- Progress - -func (out chanOutput) WriteProgress(p Progress) error { - out <- p - return nil -} - -// ChanOutput returns an Output that writes progress updates to the -// supplied channel. -func ChanOutput(progressChan chan<- Progress) Output { - return chanOutput(progressChan) -} - -// Update is a convenience function to write a progress update to the channel. -func Update(out Output, id, action string) { - out.WriteProgress(Progress{ID: id, Action: action}) -} - -// Updatef is a convenience function to write a printf-formatted progress update -// to the channel. -func Updatef(out Output, id, format string, a ...interface{}) { - Update(out, id, fmt.Sprintf(format, a...)) -} - -// Message is a convenience function to write a progress message to the channel. -func Message(out Output, id, message string) { - out.WriteProgress(Progress{ID: id, Message: message}) -} - -// Messagef is a convenience function to write a printf-formatted progress -// message to the channel. -func Messagef(out Output, id, format string, a ...interface{}) { - Message(out, id, fmt.Sprintf(format, a...)) -} - -// Aux sends auxiliary information over a progress interface, which will not be -// formatted for the UI. This is used for things such as push signing. -func Aux(out Output, a interface{}) { - out.WriteProgress(Progress{Aux: a}) -} diff --git a/pkg/progress/progressreader.go b/pkg/progress/progressreader.go deleted file mode 100644 index c39e2b69f..000000000 --- a/pkg/progress/progressreader.go +++ /dev/null @@ -1,59 +0,0 @@ -package progress - -import ( - "io" -) - -// Reader is a Reader with progress bar. -type Reader struct { - in io.ReadCloser // Stream to read from - out Output // Where to send progress bar to - size int64 - current int64 - lastUpdate int64 - id string - action string -} - -// NewProgressReader creates a new ProgressReader. -func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { - return &Reader{ - in: in, - out: out, - size: size, - id: id, - action: action, - } -} - -func (p *Reader) Read(buf []byte) (n int, err error) { - read, err := p.in.Read(buf) - p.current += int64(read) - updateEvery := int64(1024 * 512) //512kB - if p.size > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int64(0.01 * float64(p.size)); increment < updateEvery { - updateEvery = increment - } - } - if p.current-p.lastUpdate > updateEvery || err != nil { - p.updateProgress(err != nil && read == 0) - p.lastUpdate = p.current - } - - return read, err -} - -// Close closes the progress reader and its underlying reader. -func (p *Reader) Close() error { - if p.current < p.size { - // print a full progress bar when closing prematurely - p.current = p.size - p.updateProgress(false) - } - return p.in.Close() -} - -func (p *Reader) updateProgress(last bool) { - p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) -} diff --git a/pkg/progress/progressreader_test.go b/pkg/progress/progressreader_test.go deleted file mode 100644 index b14d40156..000000000 --- a/pkg/progress/progressreader_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package progress - -import ( - "bytes" - "io" - "io/ioutil" - "testing" -) - -func TestOutputOnPrematureClose(t *testing.T) { - content := []byte("TESTING") - reader := ioutil.NopCloser(bytes.NewReader(content)) - progressChan := make(chan Progress, 10) - - pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") - - part := make([]byte, 4, 4) - _, err := io.ReadFull(pr, part) - if err != nil { - pr.Close() - t.Fatal(err) - } - -drainLoop: - for { - select { - case <-progressChan: - default: - break drainLoop - } - } - - pr.Close() - - select { - case <-progressChan: - default: - t.Fatalf("Expected some output when closing prematurely") - } -} - -func TestCompleteSilently(t *testing.T) { - content := []byte("TESTING") - reader := ioutil.NopCloser(bytes.NewReader(content)) - progressChan := make(chan Progress, 10) - - pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") - - out, err := ioutil.ReadAll(pr) - if err != nil { - pr.Close() - t.Fatal(err) - } - if string(out) != "TESTING" { - pr.Close() - t.Fatalf("Unexpected output %q from reader", string(out)) - } - -drainLoop: - for { - select { - case <-progressChan: - default: - break drainLoop - } - } - - pr.Close() - - select { - case <-progressChan: - t.Fatalf("Should have closed silently when read is complete") - default: - } -} diff --git a/pkg/pubsub/publisher.go b/pkg/pubsub/publisher.go deleted file mode 100644 index 09364617e..000000000 --- a/pkg/pubsub/publisher.go +++ /dev/null @@ -1,111 +0,0 @@ -package pubsub - -import ( - "sync" - "time" -) - -var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} - -// NewPublisher creates a new pub/sub publisher to broadcast messages. -// The duration is used as the send timeout as to not block the publisher publishing -// messages to other clients if one client is slow or unresponsive. -// The buffer is used when creating new channels for subscribers. -func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { - return &Publisher{ - buffer: buffer, - timeout: publishTimeout, - subscribers: make(map[subscriber]topicFunc), - } -} - -type subscriber chan interface{} -type topicFunc func(v interface{}) bool - -// Publisher is basic pub/sub structure. Allows to send events and subscribe -// to them. Can be safely used from multiple goroutines. -type Publisher struct { - m sync.RWMutex - buffer int - timeout time.Duration - subscribers map[subscriber]topicFunc -} - -// Len returns the number of subscribers for the publisher -func (p *Publisher) Len() int { - p.m.RLock() - i := len(p.subscribers) - p.m.RUnlock() - return i -} - -// Subscribe adds a new subscriber to the publisher returning the channel. -func (p *Publisher) Subscribe() chan interface{} { - return p.SubscribeTopic(nil) -} - -// SubscribeTopic adds a new subscriber that filters messages sent by a topic. -func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { - ch := make(chan interface{}, p.buffer) - p.m.Lock() - p.subscribers[ch] = topic - p.m.Unlock() - return ch -} - -// Evict removes the specified subscriber from receiving any more messages. -func (p *Publisher) Evict(sub chan interface{}) { - p.m.Lock() - delete(p.subscribers, sub) - close(sub) - p.m.Unlock() -} - -// Publish sends the data in v to all subscribers currently registered with the publisher. -func (p *Publisher) Publish(v interface{}) { - p.m.RLock() - if len(p.subscribers) == 0 { - p.m.RUnlock() - return - } - - wg := wgPool.Get().(*sync.WaitGroup) - for sub, topic := range p.subscribers { - wg.Add(1) - go p.sendTopic(sub, topic, v, wg) - } - wg.Wait() - wgPool.Put(wg) - p.m.RUnlock() -} - -// Close closes the channels to all subscribers registered with the publisher. -func (p *Publisher) Close() { - p.m.Lock() - for sub := range p.subscribers { - delete(p.subscribers, sub) - close(sub) - } - p.m.Unlock() -} - -func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { - defer wg.Done() - if topic != nil && !topic(v) { - return - } - - // send under a select as to not block if the receiver is unavailable - if p.timeout > 0 { - select { - case sub <- v: - case <-time.After(p.timeout): - } - return - } - - select { - case sub <- v: - default: - } -} diff --git a/pkg/pubsub/publisher_test.go b/pkg/pubsub/publisher_test.go deleted file mode 100644 index d6b0a1d59..000000000 --- a/pkg/pubsub/publisher_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package pubsub - -import ( - "fmt" - "testing" - "time" -) - -func TestSendToOneSub(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - c := p.Subscribe() - - p.Publish("hi") - - msg := <-c - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } -} - -func TestSendToMultipleSubs(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} - subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) - - p.Publish("hi") - - for _, c := range subs { - msg := <-c - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } - } -} - -func TestEvictOneSub(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - s1 := p.Subscribe() - s2 := p.Subscribe() - - p.Evict(s1) - p.Publish("hi") - if _, ok := <-s1; ok { - t.Fatal("expected s1 to not receive the published message") - } - - msg := <-s2 - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } -} - -func TestClosePublisher(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} - subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) - p.Close() - - for _, c := range subs { - if _, ok := <-c; ok { - t.Fatal("expected all subscriber channels to be closed") - } - } -} - -const sampleText = "test" - -type testSubscriber struct { - dataCh chan interface{} - ch chan error -} - -func (s *testSubscriber) Wait() error { - return <-s.ch -} - -func newTestSubscriber(p *Publisher) *testSubscriber { - ts := &testSubscriber{ - dataCh: p.Subscribe(), - ch: make(chan error), - } - go func() { - for data := range ts.dataCh { - s, ok := data.(string) - if !ok { - ts.ch <- fmt.Errorf("Unexpected type %T", data) - break - } - if s != sampleText { - ts.ch <- fmt.Errorf("Unexpected text %s", s) - break - } - } - close(ts.ch) - }() - return ts -} - -// for testing with -race -func TestPubSubRace(t *testing.T) { - p := NewPublisher(0, 1024) - var subs [](*testSubscriber) - for j := 0; j < 50; j++ { - subs = append(subs, newTestSubscriber(p)) - } - for j := 0; j < 1000; j++ { - p.Publish(sampleText) - } - time.AfterFunc(1*time.Second, func() { - for _, s := range subs { - p.Evict(s.dataCh) - } - }) - for _, s := range subs { - s.Wait() - } -} - -func BenchmarkPubSub(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - p := NewPublisher(0, 1024) - var subs [](*testSubscriber) - for j := 0; j < 50; j++ { - subs = append(subs, newTestSubscriber(p)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - p.Publish(sampleText) - } - time.AfterFunc(1*time.Second, func() { - for _, s := range subs { - p.Evict(s.dataCh) - } - }) - for _, s := range subs { - if err := s.Wait(); err != nil { - b.Fatal(err) - } - } - } -} diff --git a/pkg/registrar/registrar.go b/pkg/registrar/registrar.go deleted file mode 100644 index 1e75ee995..000000000 --- a/pkg/registrar/registrar.go +++ /dev/null @@ -1,127 +0,0 @@ -// Package registrar provides name registration. It reserves a name to a given key. -package registrar - -import ( - "errors" - "sync" -) - -var ( - // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved - ErrNameReserved = errors.New("name is reserved") - // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved - ErrNameNotReserved = errors.New("name is not reserved") - // ErrNoSuchKey is returned when trying to find the names for a key which is not known - ErrNoSuchKey = errors.New("provided key does not exist") -) - -// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to -// Names must be unique. -// Registrar is safe for concurrent access. -type Registrar struct { - idx map[string][]string - names map[string]string - mu sync.Mutex -} - -// NewRegistrar creates a new Registrar with the an empty index -func NewRegistrar() *Registrar { - return &Registrar{ - idx: make(map[string][]string), - names: make(map[string]string), - } -} - -// Reserve registers a key to a name -// Reserve is idempotent -// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique -func (r *Registrar) Reserve(name, key string) error { - r.mu.Lock() - defer r.mu.Unlock() - - if k, exists := r.names[name]; exists { - if k != key { - return ErrNameReserved - } - return nil - } - - r.idx[key] = append(r.idx[key], name) - r.names[name] = key - return nil -} - -// Release releases the reserved name -// Once released, a name can be reserved again -func (r *Registrar) Release(name string) { - r.mu.Lock() - defer r.mu.Unlock() - - key, exists := r.names[name] - if !exists { - return - } - - for i, n := range r.idx[key] { - if n != name { - continue - } - r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) - break - } - - delete(r.names, name) - - if len(r.idx[key]) == 0 { - delete(r.idx, key) - } -} - -// Delete removes all reservations for the passed in key. -// All names reserved to this key are released. -func (r *Registrar) Delete(key string) { - r.mu.Lock() - for _, name := range r.idx[key] { - delete(r.names, name) - } - delete(r.idx, key) - r.mu.Unlock() -} - -// GetNames lists all the reserved names for the given key -func (r *Registrar) GetNames(key string) ([]string, error) { - r.mu.Lock() - defer r.mu.Unlock() - - names, exists := r.idx[key] - if !exists { - return nil, ErrNoSuchKey - } - return names, nil -} - -// Get returns the key that the passed in name is reserved to -func (r *Registrar) Get(name string) (string, error) { - r.mu.Lock() - key, exists := r.names[name] - r.mu.Unlock() - - if !exists { - return "", ErrNameNotReserved - } - return key, nil -} - -// GetAll returns all registered names -func (r *Registrar) GetAll() map[string][]string { - out := make(map[string][]string) - - r.mu.Lock() - // copy index into out - for id, names := range r.idx { - out[id] = names - } - r.mu.Unlock() - return out -} diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go deleted file mode 100644 index 0c1ef312a..000000000 --- a/pkg/registrar/registrar_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package registrar - -import ( - "reflect" - "testing" -) - -func TestReserve(t *testing.T) { - r := NewRegistrar() - - obj := "test1" - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - obj2 := "test2" - err := r.Reserve("test", obj2) - if err == nil { - t.Fatalf("expected error when reserving an already reserved name to another object") - } - if err != ErrNameReserved { - t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") - } -} - -func TestRelease(t *testing.T) { - r := NewRegistrar() - obj := "testing" - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - r.Release("test") - r.Release("test") // Ensure there is no panic here - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } -} - -func TestGetNames(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - r.Reserve("test3", "other") - - names2, err := r.GetNames(obj) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, names2) { - t.Fatalf("Exepected: %v, Got: %v", names, names2) - } -} - -func TestDelete(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - - r.Reserve("test3", "other") - r.Delete(obj) - - _, err := r.GetNames(obj) - if err == nil { - t.Fatal("expected error getting names for deleted key") - } - - if err != ErrNoSuchKey { - t.Fatal("expected `ErrNoSuchKey`") - } -} - -func TestGet(t *testing.T) { - r := NewRegistrar() - obj := "testing" - name := "test" - - _, err := r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } - - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - - if _, err = r.Get(name); err != nil { - t.Fatal(err) - } - - r.Delete(obj) - _, err = r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } -} diff --git a/pkg/signal/README.md b/pkg/signal/README.md deleted file mode 100644 index 2b237a594..000000000 --- a/pkg/signal/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go deleted file mode 100644 index 68bb77cf5..000000000 --- a/pkg/signal/signal.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package signal provides helper functions for dealing with signals across -// various operating systems. -package signal - -import ( - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" -) - -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - handledSigs := []os.Signal{} - for _, s := range SignalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} - -// ParseSignal translates a string to a valid syscall signal. -// It returns an error if the signal map doesn't include the given signal. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - if s == 0 { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return syscall.Signal(s), nil - } - signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return signal, nil -} - -// ValidSignalForPlatform returns true if a signal is valid on the platform -func ValidSignalForPlatform(sig syscall.Signal) bool { - for _, v := range SignalMap { - if v == sig { - return true - } - } - return false -} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go deleted file mode 100644 index 946de87e9..000000000 --- a/pkg/signal/signal_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Darwin signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go deleted file mode 100644 index 6b9569bb7..000000000 --- a/pkg/signal/signal_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of FreeBSD signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "THR": syscall.SIGTHR, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go deleted file mode 100644 index d418cbe9e..000000000 --- a/pkg/signal/signal_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -package signal - -import ( - "syscall" -) - -const ( - sigrtmin = 34 - sigrtmax = 64 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/pkg/signal/signal_solaris.go b/pkg/signal/signal_solaris.go deleted file mode 100644 index 89576b9e3..000000000 --- a/pkg/signal/signal_solaris.go +++ /dev/null @@ -1,42 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Solaris signals. -// SIGINFO and SIGTHR not defined for Solaris -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go deleted file mode 100644 index 6621d3718..000000000 --- a/pkg/signal/signal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) - -const ( - // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. - SIGCHLD = syscall.SIGCHLD - // SIGWINCH is a signal sent to a process when its controlling terminal changes its size - SIGWINCH = syscall.SIGWINCH - // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading - SIGPIPE = syscall.SIGPIPE - // DefaultStopSignal is the syscall signal used to stop a container in unix systems. - DefaultStopSignal = "SIGTERM" -) diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go deleted file mode 100644 index c592d37df..000000000 --- a/pkg/signal/signal_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris - -package signal - -import ( - "syscall" -) - -// SignalMap is an empty map of signals for unsupported platform. -var SignalMap = map[string]syscall.Signal{} diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go deleted file mode 100644 index 698cbf2dc..000000000 --- a/pkg/signal/signal_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go deleted file mode 100644 index d35ef0e86..000000000 --- a/pkg/signal/trap.go +++ /dev/null @@ -1,81 +0,0 @@ -package signal - -import ( - "os" - gosignal "os/signal" - "runtime" - "sync/atomic" - "syscall" - - "github.com/Sirupsen/logrus" -) - -// Trap sets up a simplified signal "trap", appropriate for common -// behavior expected from a vanilla unix command-line tool in general -// (and the Docker engine in particular). -// -// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. -// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is -// skipped and the process is terminated immediately (allows force quit of stuck daemon) -// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. -// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while -// the docker daemon is not restarted and also running under systemd. -// Fixes https://github.com/docker/docker/issues/19728 -// -func Trap(cleanup func()) { - c := make(chan os.Signal, 1) - // we will handle INT, TERM, QUIT, SIGPIPE here - signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} - gosignal.Notify(c, signals...) - go func() { - interruptCount := uint32(0) - for sig := range c { - if sig == syscall.SIGPIPE { - continue - } - - go func(sig os.Signal) { - logrus.Infof("Processing signal '%v'", sig) - switch sig { - case os.Interrupt, syscall.SIGTERM: - if atomic.LoadUint32(&interruptCount) < 3 { - // Initiate the cleanup only once - if atomic.AddUint32(&interruptCount, 1) == 1 { - // Call the provided cleanup handler - cleanup() - os.Exit(0) - } else { - return - } - } else { - // 3 SIGTERM/INT signals received; force exit without cleanup - logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") - } - case syscall.SIGQUIT: - DumpStacks() - logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") - } - //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # - os.Exit(128 + int(sig.(syscall.Signal))) - }(sig) - } - }() -} - -// DumpStacks dumps the runtime stack. -func DumpStacks() { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine - // traces won't show up in the log. - logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) -} diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 8f67ece94..000000000 --- a/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,185 +0,0 @@ -package stdcopy - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - - "github.com/Sirupsen/logrus" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - logrus.Debugf("Corrupted prefix: %v", buf[:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading header: %s", er) - return 0, er - } - } - - // Check the first byte to know where to write - switch StdType(buf[stdWriterFdIndex]) { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - default: - logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - logrus.Debugf("framesize: %d", frameSize) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading frame: %s", er) - return 0, er - } - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - logrus.Debugf("Error writing frame: %s", ew) - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go deleted file mode 100644 index 3137a7523..000000000 --- a/pkg/stdcopy/stdcopy_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package stdcopy - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestNewStdWriter(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - if writer == nil { - t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") - } -} - -func TestWriteWithUnitializedStdWriter(t *testing.T) { - writer := stdWriter{ - Writer: nil, - prefix: byte(Stdout), - } - n, err := writer.Write([]byte("Something here")) - if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") - } -} - -func TestWriteWithNilBytes(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - n, err := writer.Write(nil) - if err != nil { - t.Fatalf("Shouldn't have fail when given no data") - } - if n > 0 { - t.Fatalf("Write should have written 0 byte, but has written %d", n) - } -} - -func TestWrite(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test StdWrite.Write") - n, err := writer.Write(data) - if err != nil { - t.Fatalf("Error while writing with StdWrite") - } - if n != len(data) { - t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) - } -} - -type errWriter struct { - n int - err error -} - -func (f *errWriter) Write(buf []byte) (int, error) { - return f.n, f.err -} - -func TestWriteWithWriterError(t *testing.T) { - expectedError := errors.New("expected") - expectedReturnedBytes := 10 - writer := NewStdWriter(&errWriter{ - n: stdWriterPrefixLen + expectedReturnedBytes, - err: expectedError}, Stdout) - data := []byte("This won't get written, sigh") - n, err := writer.Write(data) - if err != expectedError { - t.Fatalf("Didn't get expected error.") - } - if n != expectedReturnedBytes { - t.Fatalf("Didn't get expected written bytes %d, got %d.", - expectedReturnedBytes, n) - } -} - -func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { - writer := NewStdWriter(&errWriter{n: -1}, Stdout) - data := []byte("This won't get written, sigh") - actual, _ := writer.Write(data) - if actual != 0 { - t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) - } -} - -func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { - buffer = new(bytes.Buffer) - dstOut := NewStdWriter(buffer, Stdout) - _, err = dstOut.Write(stdOutBytes) - if err != nil { - return - } - dstErr := NewStdWriter(buffer, Stderr) - _, err = dstErr.Write(stdErrBytes) - return -} - -func TestStdCopyWriteAndRead(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) - if err != nil { - t.Fatal(err) - } - expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) - if written != int64(expectedTotalWritten) { - t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) - } -} - -type customReader struct { - n int - err error - totalCalls int - correctCalls int - src *bytes.Buffer -} - -func (f *customReader) Read(buf []byte) (int, error) { - f.totalCalls++ - if f.totalCalls <= f.correctCalls { - return f.src.Read(buf) - } - return f.n, f.err -} - -func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { - expectedError := errors.New("error") - reader := &customReader{ - err: expectedError} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { - expectedError := errors.New("error") - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: expectedError, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyDetectsCorruptedFrame(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: io.EOF, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != startingBufLen { - t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) - } - if err != nil { - t.Fatal("Didn't get nil error") - } -} - -func TestStdCopyWithInvalidInputHeader(t *testing.T) { - dstOut := NewStdWriter(ioutil.Discard, Stdout) - dstErr := NewStdWriter(ioutil.Discard, Stderr) - src := strings.NewReader("Invalid input") - _, err := StdCopy(dstOut, dstErr, src) - if err == nil { - t.Fatal("StdCopy with invalid input header should fail.") - } -} - -func TestStdCopyWithCorruptedPrefix(t *testing.T) { - data := []byte{0x01, 0x02, 0x03} - src := bytes.NewReader(data) - written, err := StdCopy(nil, nil, src) - if err != nil { - t.Fatalf("StdCopy should not return an error with corrupted prefix.") - } - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } -} - -func TestStdCopyReturnsWriteErrors(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - expectedError := errors.New("expected") - - dstOut := &errWriter{err: expectedError} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error, got %v", err) - } -} - -func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - dstOut := &errWriter{n: startingBufLen - 10} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) - } - if err != io.ErrShortWrite { - t.Fatalf("Didn't get expected io.ErrShortWrite error") - } -} - -func BenchmarkWrite(b *testing.B) { - w := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test line for testing stdwriter performance\n") - data = bytes.Repeat(data, 100) - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := w.Write(data); err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go deleted file mode 100644 index ce6ea79de..000000000 --- a/pkg/streamformatter/streamformatter.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package streamformatter provides helper functions to format a stream. -package streamformatter - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" -) - -// StreamFormatter formats a stream, optionally using JSON. -type StreamFormatter struct { - json bool -} - -// NewStreamFormatter returns a simple StreamFormatter -func NewStreamFormatter() *StreamFormatter { - return &StreamFormatter{} -} - -// NewJSONStreamFormatter returns a StreamFormatter configured to stream json -func NewJSONStreamFormatter() *StreamFormatter { - return &StreamFormatter{true} -} - -const streamNewline = "\r\n" - -var streamNewlineBytes = []byte(streamNewline) - -// FormatStream formats the specified stream. -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") -} - -// FormatStatus formats the specified objects according to the specified format (and id). -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + streamNewline) -} - -// FormatError formats the specified error. -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) - } - return []byte("Error: " + err.Error() + streamNewline) -} - -// FormatProgress formats the progress information for a specified action. -func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - if sf.json { - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) - if err != nil { - return nil - } - return append(b, streamNewlineBytes...) - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -// NewProgressOutput returns a progress.Output object that can be passed to -// progress.NewProgressReader. -func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{ - sf: sf, - out: out, - newLines: newLines, - } -} - -type progressOutput struct { - sf *StreamFormatter - out io.Writer - newLines bool -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *progressOutput) WriteProgress(prog progress.Progress) error { - var formatted []byte - if prog.Message != "" { - formatted = out.sf.FormatStatus(prog.ID, prog.Message) - } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} - formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) - } - _, err := out.out.Write(formatted) - if err != nil { - return err - } - - if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.FormatStatus("", "")) - return err - } - - return nil -} - -// StdoutFormatter is a streamFormatter that writes to the standard output. -type StdoutFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StdoutFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -// StderrFormatter is a streamFormatter that writes to the standard error. -type StderrFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go deleted file mode 100644 index 93ec90f5f..000000000 --- a/pkg/streamformatter/streamformatter_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package streamformatter - -import ( - "encoding/json" - "errors" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/pkg/jsonmessage" -) - -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != "stream"+"\r" { - t.Fatalf("%q", res) - } -} - -func TestFormatJSONStatus(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != "a1\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != "Error: Error for formatter\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStream(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStatus(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatSimpleError(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatJSONError(t *testing.T) { - sf := NewJSONStreamFormatter() - err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatProgress(t *testing.T) { - sf := NewJSONStreamFormatter() - progress := &jsonmessage.JSONProgress{ - Current: 15, - Total: 30, - Start: 1, - } - res := sf.FormatProgress("id", "action", progress, nil) - msg := &jsonmessage.JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) - } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) - } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) - } - - // The progress will always be in the format of: - // [=========================> ] 15 B/30 B 404933h7m11s - // The last entry '404933h7m11s' is the timeLeftBox. - // However, the timeLeftBox field may change as progress.String() depends on time.Now(). - // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. - - // Compare the progress strings before the timeLeftBox - expectedProgress := "[=========================> ] 15 B/30 B" - // if terminal column is <= 110, expectedProgressShort is expected. - expectedProgressShort := " 15 B/30 B" - if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || - strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { - t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", - expectedProgress, expectedProgressShort, msg.ProgressMessage) - } - - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } -} diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 34c4ea7c5..000000000 --- a/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD deleted file mode 100644 index 9b4f4a294..000000000 --- a/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md deleted file mode 100644 index 8dba54fd0..000000000 --- a/pkg/symlink/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, -as well as a Windows long-path aware version of filepath.EvalSymlinks -from the [Go standard library](https://golang.org/pkg/path/filepath). - -The code from filepath.EvalSymlinks has been adapted in fs.go. -Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go deleted file mode 100644 index dcf707f42..000000000 --- a/pkg/symlink/fs.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if cleanP == string(filepath.Separator) { - // never Lstat "/" itself - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p + string(filepath.Separator)) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/pkg/symlink/fs_unix.go b/pkg/symlink/fs_unix.go deleted file mode 100644 index 818004f26..000000000 --- a/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package symlink - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} diff --git a/pkg/symlink/fs_unix_test.go b/pkg/symlink/fs_unix_test.go deleted file mode 100644 index 7085c0b66..000000000 --- a/pkg/symlink/fs_unix_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// +build !windows - -// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE - -package symlink - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -// TODO Windows: This needs some serious work to port to Windows. For now, -// turning off testing in this package. - -type dirOrLink struct { - path string - target string -} - -func makeFs(tmpdir string, fs []dirOrLink) error { - for _, s := range fs { - s.path = filepath.Join(tmpdir, s.path) - if s.target == "" { - os.MkdirAll(s.path, 0755) - continue - } - if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { - return err - } - if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { - return err - } - } - return nil -} - -func testSymlink(tmpdir, path, expected, scope string) error { - rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) - if err != nil { - return err - } - expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) - if err != nil { - return err - } - if expected != rewrite { - return fmt.Errorf("Expected %q got %q", expected, rewrite) - } - return nil -} - -func TestFollowSymlinkAbsolute(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{ - {path: "linkdir", target: "realdir"}, - {path: "linkdir/foo/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { - if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("expected an error") - } -} - -func TestFollowSymlinkLastLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting allowing symlink e lead us to ../b - // normalize to the "testdata/fs/a" - if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata/fs" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChain(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink g (pointed at by symlink h) take out of scope - // TODO: we should probably normalize to scope here because ../[....]/root - // is out of scope and we leak information - if err := makeFs(tmpdir, []dirOrLink{ - {path: "testdata/fs/b/h", target: "../g"}, - {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutPath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink -> ../directory/file escape from scope - // normalize to "testdata/fs/j" - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkToRoot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSlashDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we stay in scope without leaking information - // this also checks for escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath2(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkScopeLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root2/foo", target: "../bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRootScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - expected, err := filepath.EvalSymlinks(tmpdir) - if err != nil { - t.Fatal(err) - } - rewrite, err := FollowSymlinkInScope(tmpdir, "/") - if err != nil { - t.Fatal(err) - } - if rewrite != expected { - t.Fatalf("expected %q got %q", expected, rewrite) - } -} - -func TestFollowSymlinkEmpty(t *testing.T) { - res, err := FollowSymlinkInScope("", "") - if err != nil { - t.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - if res != wd { - t.Fatalf("expected %q got %q", wd, res) - } -} - -func TestFollowSymlinkCircular(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for foo -> foo") - } - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/bar", target: "baz"}, - {path: "root/baz", target: "../bak"}, - {path: "root/bak", target: "/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for bar -> baz -> bak -> bar") - } -} - -func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root/a", target: "r/s"}, - {path: "root/r", target: "../root/t"}, - {path: "root/root/t/s/b", target: "/../u"}, - {path: "root/u/c", target: "."}, - {path: "root/u/x/y", target: "../v"}, - {path: "root/u/v", target: "/../w"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/slash", target: "/"}, - {path: "root/sym", target: "/idontexist/../slash"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/sym", target: "/foo/bar"}, - {path: "root/hello", target: "/sym/../baz"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { - t.Fatal(err) - } -} diff --git a/pkg/symlink/fs_windows.go b/pkg/symlink/fs_windows.go deleted file mode 100644 index 449fe5648..000000000 --- a/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,155 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/longpath" -) - -func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return syscall.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // syscall.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go deleted file mode 100644 index d580584d6..000000000 --- a/pkg/tailfile/tailfile.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package tailfile provides helper functions to read the nth lines of any -// ReadSeeker. -package tailfile - -import ( - "bytes" - "errors" - "io" - "os" -) - -const blockSize = 1024 - -var eol = []byte("\n") - -// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. -var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") - -//TailFile returns last n lines of reader f (could be a fil). -func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { - if n <= 0 { - return nil, ErrNonPositiveLinesNumber - } - size, err := f.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - block := -1 - var data []byte - var cnt int - for { - var b []byte - step := int64(block * blockSize) - left := size + step // how many bytes to beginning - if left < 0 { - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - b = make([]byte, blockSize+left) - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - break - } else { - b = make([]byte, blockSize) - if _, err := f.Seek(step, os.SEEK_END); err != nil { - return nil, err - } - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - } - cnt += bytes.Count(b, eol) - if cnt > n { - break - } - block-- - } - lines := bytes.Split(data, eol) - if n < len(lines) { - return lines[len(lines)-n-1 : len(lines)-1], nil - } - return lines[:len(lines)-1], nil -} diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go deleted file mode 100644 index 31217c036..000000000 --- a/pkg/tailfile/tailfile_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package tailfile - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestTailFile(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -third line -fourth line -fifth line -next first line -next second line -next third line -next fourth line -next fifth line -last first line -next first line -next second line -next third line -next fourth line -next fifth line -next first line -next second line -next third line -next fourth line -next fifth line -last second line -last third line -last fourth line -last fifth line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - expected := []string{"last fourth line", "last fifth line"} - res, err := TailFile(f, 2) - if err != nil { - t.Fatal(err) - } - for i, l := range res { - t.Logf("%s", l) - if expected[i] != string(l) { - t.Fatalf("Expected line %s, got %s", expected[i], l) - } - } -} - -func TestTailFileManyLines(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - expected := []string{"first line", "second line"} - res, err := TailFile(f, 10000) - if err != nil { - t.Fatal(err) - } - for i, l := range res { - t.Logf("%s", l) - if expected[i] != string(l) { - t.Fatalf("Expected line %s, got %s", expected[i], l) - } - } -} - -func TestTailEmptyFile(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - res, err := TailFile(f, 10000) - if err != nil { - t.Fatal(err) - } - if len(res) != 0 { - t.Fatal("Must be empty slice from empty file") - } -} - -func TestTailNegativeN(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { - t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) - } - if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { - t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) - } -} - -func BenchmarkTail(b *testing.B) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - b.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - for i := 0; i < 10000; i++ { - if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := TailFile(f, 1000); err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/tarsum/builder_context.go b/pkg/tarsum/builder_context.go deleted file mode 100644 index b42983e98..000000000 --- a/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/pkg/tarsum/builder_context_test.go b/pkg/tarsum/builder_context_test.go deleted file mode 100644 index 719f72895..000000000 --- a/pkg/tarsum/builder_context_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package tarsum - -import ( - "io" - "io/ioutil" - "os" - "testing" -) - -// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing -func TestTarSumRemoveNonExistent(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - - ts.(BuilderContext).Remove("") - ts.(BuilderContext).Remove("Anything") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) - } -} - -// Remove a tarsum (in the BuilderContext) -func TestTarSumRemove(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - 1 - - ts.(BuilderContext).Remove("etc/sudoers") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) - } -} diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 5abf5e7ba..000000000 --- a/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,126 +0,0 @@ -package tarsum - -import "sort" - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/pkg/tarsum/fileinfosums_test.go b/pkg/tarsum/fileinfosums_test.go deleted file mode 100644 index bb700d8bd..000000000 --- a/pkg/tarsum/fileinfosums_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package tarsum - -import "testing" - -func newFileInfoSums() FileInfoSums { - return FileInfoSums{ - fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, - fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, - fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, - fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, - fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, - fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, - } -} - -func TestSortFileInfoSums(t *testing.T) { - dups := newFileInfoSums().GetAllFile("dup1") - if len(dups) != 2 { - t.Errorf("expected length 2, got %d", len(dups)) - } - dups.SortByNames() - if dups[0].Pos() != 4 { - t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) - } - - fis := newFileInfoSums() - expected := "0abcdef1234567890" - fis.SortBySums() - got := fis[0].Sum() - if got != expected { - t.Errorf("Expected %q, got %q", expected, got) - } - - fis = newFileInfoSums() - expected = "dup1" - fis.SortByNames() - gotFis := fis[0] - if gotFis.Name() != expected { - t.Errorf("Expected %q, got %q", expected, gotFis.Name()) - } - // since a duplicate is first, ensure it is ordered first by position too - if gotFis.Pos() != 4 { - t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) - } - - fis = newFileInfoSums() - fis.SortByPos() - if fis[0].Pos() != 0 { - t.Errorf("sorted fileInfoSums by Pos should order them by position.") - } - - fis = newFileInfoSums() - expected = "deadbeef1" - gotFileInfoSum := fis.GetFile("dup1") - if gotFileInfoSum.Sum() != expected { - t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) - } - if fis.GetFile("noPresent") != nil { - t.Errorf("Should have return nil if name not found.") - } - -} diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go deleted file mode 100644 index 154788db8..000000000 --- a/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "path" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = path.Clean(currentHeader.Name) - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md deleted file mode 100644 index 89b2e49f9..000000000 --- a/pkg/tarsum/tarsum_spec.md +++ /dev/null @@ -1,230 +0,0 @@ -page_title: TarSum checksum specification -page_description: Documentation for algorithms used in the TarSum checksum calculation -page_keywords: docker, checksum, validation, tarsum - -# TarSum Checksum Specification - -## Abstract - -This document describes the algorithms used in performing the TarSum checksum -calculation on filesystem layers, the need for this method over existing -methods, and the versioning of this calculation. - -## Warning - -This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. - -This is _not_ a cryptographic attestation, and should not be considered secure. - -## Introduction - -The transportation of filesystems, regarding Docker, is done with tar(1) -archives. There are a variety of tar serialization formats [2], and a key -concern here is ensuring a repeatable checksum given a set of inputs from a -generic tar archive. Types of transportation include distribution to and from a -registry endpoint, saving and loading through commands or Docker daemon APIs, -transferring the build context from client to Docker daemon, and committing the -filesystem of a container to become an image. - -As tar archives are used for transit, but not preserved in many situations, the -focus of the algorithm is to ensure the integrity of the preserved filesystem, -while maintaining a deterministic accountability. This includes neither -constraining the ordering or manipulation of the files during the creation or -unpacking of the archive, nor include additional metadata state about the file -system attributes. - -## Intended Audience - -This document is outlining the methods used for consistent checksum calculation -for filesystems transported via tar archives. - -Auditing these methodologies is an open and iterative process. This document -should accommodate the review of source code. Ultimately, this document should -be the starting point of further refinements to the algorithm and its future -versions. - -## Concept - -The checksum mechanism must ensure the integrity and assurance of the -filesystem payload. - -## Checksum Algorithm Profile - -A checksum mechanism must define the following operations and attributes: - -* Associated hashing cipher - used to checksum each file payload and attribute - information. -* Checksum list - each file of the filesystem archive has its checksum - calculated from the payload and attributes of the file. The final checksum is - calculated from this list, with specific ordering. -* Version - as the algorithm adapts to requirements, there are behaviors of the - algorithm to manage by versioning. -* Archive being calculated - the tar archive having its checksum calculated - -## Elements of TarSum checksum - -The calculated sum output is a text string. The elements included in the output -of the calculated sum comprise the information needed for validation of the sum -(TarSum version and hashing cipher used) and the expected checksum in hexadecimal -form. - -There are two delimiters used: -* '+' separates TarSum version from hashing cipher -* ':' separates calculation mechanics from expected hash - -Example: - -``` - "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" - | | \ | - | | \ | - |_version_|_cipher__|__ | - | \ | - |_calculation_mechanics_|______________________expected_sum_______________________| -``` - -## Versioning - -Versioning was introduced [0] to accommodate differences in calculation needed, -and ability to maintain reverse compatibility. - -The general algorithm will be describe further in the 'Calculation'. - -### Version0 - -This is the initial version of TarSum. - -Its element in the TarSum checksum string is `tarsum`. - -### Version1 - -Its element in the TarSum checksum is `tarsum.v1`. - -The notable changes in this version: -* Exclusion of file `mtime` from the file information headers, in each file - checksum calculation -* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax - tar file info headers) keys and values in each file checksum calculation - -### VersionDev - -*Do not use unless validating refinements to the checksum algorithm* - -Its element in the TarSum checksum is `tarsum.dev`. - -This is a floating place holder for a next version and grounds for testing -changes. The methods used for calculation are subject to change without notice, -and this version is for testing and not for production use. - -## Ciphers - -The official default and standard hashing cipher used in the calculation mechanic -is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. - -Though the TarSum algorithm itself is not exclusively bound to the single -hashing cipher `sha256`, support for alternate hashing ciphers was later added -[1]. Use cases for alternate cipher could include future-proofing TarSum -checksum format and using faster cipher hashes for tar filesystem checksums. - -## Calculation - -### Requirement - -As mentioned earlier, the calculation is such that it takes into consideration -the lifecycle of the tar archive. In that the tar archive is not an immutable, -permanent artifact. Otherwise options like relying on a known hashing cipher -checksum of the archive itself would be reliable enough. The tar archive of the -filesystem is used as a transportation medium for Docker images, and the -archive is discarded once its contents are extracted. Therefore, for consistent -validation items such as order of files in the tar archive and time stamps are -subject to change once an image is received. - -### Process - -The method is typically iterative due to reading tar info headers from the -archive stream, though this is not a strict requirement. - -#### Files - -Each file in the tar archive have their contents (headers and body) checksummed -individually using the designated associated hashing cipher. The ordered -headers of the file are written to the checksum calculation first, and then the -payload of the file body. - -The resulting checksum of the file is appended to the list of file sums. The -sum is encoded as a string of the hexadecimal digest. Additionally, the file -name and position in the archive is kept as reference for special ordering. - -#### Headers - -The following headers are read, in this -order ( and the corresponding representation of its value): -* 'name' - string -* 'mode' - string of the base10 integer -* 'uid' - string of the integer -* 'gid' - string of the integer -* 'size' - string of the integer -* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC -* 'typeflag' - string of the char -* 'linkname' - string -* 'uname' - string -* 'gname' - string -* 'devmajor' - string of the integer -* 'devminor' - string of the integer - -For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax -headers) included after the above list. These xattrs key/values are first -sorted by the keys. - -#### Header Format - -The ordered headers are written to the hash in the format of - - "{.key}{.value}" - -with no newline. - -#### Body - -After the order headers of the file have been added to the checksum for the -file, the body of the file is written to the hash. - -#### List of file sums - -The list of file sums is sorted by the string of the hexadecimal digest. - -If there are two files in the tar with matching paths, the order of occurrence -for that path is reflected for the sums of the corresponding file header and -body. - -#### Final Checksum - -Begin with a fresh or initial state of the associated hash cipher. If there is -additional payload to include in the TarSum calculation for the archive, it is -written first. Then each checksum from the ordered list of file sums is written -to the hash. - -The resulting digest is formatted per the Elements of TarSum checksum, -including the TarSum version, the associated hash cipher and the hexadecimal -encoded checksum digest. - -## Security Considerations - -The initial version of TarSum has undergone one update that could invalidate -handcrafted tar archives. The tar archive format supports appending of files -with same names as prior files in the archive. The latter file will clobber the -prior file of the same path. Due to this the algorithm now accounts for files -with matching paths, and orders the list of file sums accordingly [3]. - -## Footnotes - -* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 -* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e -* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 -* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 - -## Acknowledgments - -Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the -TarSum calculation. - diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go deleted file mode 100644 index 54bec53fc..000000000 --- a/pkg/tarsum/tarsum_test.go +++ /dev/null @@ -1,656 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -type testLayer struct { - filename string - options *sizedOptions - jsonfile string - gzip bool - tarsum string - version Version - hash THash -} - -var testLayers = []testLayer{ - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: Version0, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - gzip: true, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - // Tests existing version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: Version0, - tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, - { - // Tests next version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, - { - filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", - jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", - tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, - { - // this tar has two files with the same path - filename: "testdata/collision/collision-0.tar", - tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, - { - // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above - filename: "testdata/collision/collision-1.tar", - tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, - { - // this tar has newer of collider-0.tar, ensuring is has different hash - filename: "testdata/collision/collision-2.tar", - tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, - { - // this tar has newer of collider-1.tar, ensuring is has different hash - filename: "testdata/collision/collision-3.tar", - tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", - hash: md5THash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", - hash: sha1Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", - hash: sha224Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", - hash: sha384Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", - hash: sha512Hash, - }, -} - -type sizedOptions struct { - num int64 - size int64 - isRand bool - realFile bool -} - -// make a tar: -// * num is the number of files the tar should have -// * size is the bytes per file -// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) -// * realFile will write to a TempFile, instead of an in memory buffer -func sizedTar(opts sizedOptions) io.Reader { - var ( - fh io.ReadWriter - err error - ) - if opts.realFile { - fh, err = ioutil.TempFile("", "tarsum") - if err != nil { - return nil - } - } else { - fh = bytes.NewBuffer([]byte{}) - } - tarW := tar.NewWriter(fh) - defer tarW.Close() - for i := int64(0); i < opts.num; i++ { - err := tarW.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/testdata%d", i), - Mode: 0755, - Uid: 0, - Gid: 0, - Size: opts.size, - }) - if err != nil { - return nil - } - var rBuf []byte - if opts.isRand { - rBuf = make([]byte, 8) - _, err = rand.Read(rBuf) - if err != nil { - return nil - } - } else { - rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} - } - - for i := int64(0); i < opts.size/int64(8); i++ { - tarW.Write(rBuf) - } - } - return fh -} - -func emptyTarSum(gzip bool) (TarSum, error) { - reader, writer := io.Pipe() - tarWriter := tar.NewWriter(writer) - - // Immediately close tarWriter and write-end of the - // Pipe in a separate goroutine so we don't block. - go func() { - tarWriter.Close() - writer.Close() - }() - - return NewTarSum(reader, !gzip, Version0) -} - -// Test errors on NewTarsumForLabel -func TestNewTarSumForLabelInvalid(t *testing.T) { - reader := strings.NewReader("") - - if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - - if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } -} - -func TestNewTarSumForLabel(t *testing.T) { - - layer := testLayers[0] - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - label := strings.Split(layer.tarsum, ":")[0] - ts, err := NewTarSumForLabel(reader, false, label) - if err != nil { - t.Fatal(err) - } - - // Make sure it actually worked by reading a little bit of it - nbByteToRead := 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - } -} - -// TestEmptyTar tests that tarsum does not fail to read an empty tar -// and correctly returns the hex digest of an empty hash. -func TestEmptyTar(t *testing.T) { - // Test without gzip. - ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } - - zeroBlock := make([]byte, 1024) - buf := new(bytes.Buffer) - - n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { - t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) - } - - expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) - resultSum := ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test with gzip. - ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } - buf.Reset() - - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - bufgz := new(bytes.Buffer) - gz := gzip.NewWriter(bufgz) - n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) - gz.Close() - gzBytes := bufgz.Bytes() - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { - t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test without ever actually writing anything. - if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { - t.Fatal(err) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } -} - -var ( - md5THash = NewTHash("md5", md5.New) - sha1Hash = NewTHash("sha1", sha1.New) - sha224Hash = NewTHash("sha224", sha256.New224) - sha384Hash = NewTHash("sha384", sha512.New384) - sha512Hash = NewTHash("sha512", sha512.New) -) - -// Test all the build-in read size : buf8K, buf16K, buf32K and more -func TestTarSumsReadSize(t *testing.T) { - // Test always on the same layer (that is big enough) - layer := testLayers[0] - - for i := 0; i < 5; i++ { - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, layer.version) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - nbByteToRead := (i + 1) * 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - continue - } - } -} - -func TestTarSums(t *testing.T) { - for _, layer := range testLayers { - var ( - fh io.Reader - err error - ) - if len(layer.filename) > 0 { - fh, err = os.Open(layer.filename) - if err != nil { - t.Errorf("failed to open %s: %s", layer.filename, err) - continue - } - } else if layer.options != nil { - fh = sizedTar(*layer.options) - } else { - // What else is there to test? - t.Errorf("what to do with %#v", layer) - continue - } - if file, ok := fh.(*os.File); ok { - defer file.Close() - } - - var ts TarSum - if layer.hash == nil { - // double negatives! - ts, err = NewTarSum(fh, !layer.gzip, layer.version) - } else { - ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) - } - if err != nil { - t.Errorf("%q :: %q", err, layer.filename) - continue - } - - // Read variable number of bytes to test dynamic buffer - dBuf := make([]byte, 1) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 1B from %s: %s", layer.filename, err) - continue - } - dBuf = make([]byte, 16*1024) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) - continue - } - - // Read and discard remaining bytes - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to copy from %s: %s", layer.filename, err) - continue - } - var gotSum string - if len(layer.jsonfile) > 0 { - jfh, err := os.Open(layer.jsonfile) - if err != nil { - t.Errorf("failed to open %s: %s", layer.jsonfile, err) - continue - } - buf, err := ioutil.ReadAll(jfh) - if err != nil { - t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) - continue - } - gotSum = ts.Sum(buf) - } else { - gotSum = ts.Sum(nil) - } - - if layer.tarsum != gotSum { - t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) - } - var expectedHashName string - if layer.hash != nil { - expectedHashName = layer.hash.Name() - } else { - expectedHashName = DefaultTHash.Name() - } - if expectedHashName != ts.Hash().Name() { - t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) - } - } -} - -func TestIteration(t *testing.T) { - headerTests := []struct { - expectedSum string // TODO(vbatts) it would be nice to get individual sums of each - version Version - hdr *tar.Header - data []byte - }{ - { - "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", - Version0, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", - VersionDev, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", - VersionDev, - &tar.Header{ - Name: "another.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.key1": "value1", - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.KEY1": "value1", // adding different case to ensure different sum - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", - Version0, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.NOT": "CALCULATED", - }, - }, - []byte("test"), - }, - } - for _, htest := range headerTests { - s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) - if err != nil { - t.Fatal(err) - } - - if s != htest.expectedSum { - t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) - } - } - -} - -func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { - buf := bytes.NewBuffer(nil) - // first build our test tar - tw := tar.NewWriter(buf) - if err := tw.WriteHeader(h); err != nil { - return "", err - } - if _, err := tw.Write(data); err != nil { - return "", err - } - tw.Close() - - ts, err := NewTarSum(buf, true, v) - if err != nil { - return "", err - } - tr := tar.NewReader(ts) - for { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - // Signals the end of the archive. - break - } - if err != nil { - return "", err - } - if _, err = io.Copy(ioutil.Discard, tr); err != nil { - return "", err - } - } - return ts.Sum(nil), nil -} - -func Benchmark9kTar(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - if err != nil { - b.Error(err) - return - } - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, true, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -func Benchmark9kTarGzip(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - if err != nil { - b.Error(err) - return - } - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) -} - -func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { - var fh *os.File - tarReader := sizedTar(opts) - if br, ok := tarReader.(*os.File); ok { - fh = br - } - defer os.Remove(fh.Name()) - defer fh.Close() - - b.SetBytes(opts.size * opts.num) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts, err := NewTarSum(fh, !isGzip, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - fh.Seek(0, 0) - } -} diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json deleted file mode 100644 index 48e2af349..000000000 --- a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json +++ /dev/null @@ -1 +0,0 @@ -{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar deleted file mode 100644 index dfd5c204aea77673f13fdd2f81cb4af1c155c00c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg diff --git a/pkg/tarsum/testdata/collision/collision-2.tar b/pkg/tarsum/testdata/collision/collision-2.tar deleted file mode 100644 index 7b5c04a9644808851fcccab5c3c240bf342abd93..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go deleted file mode 100644 index 288228685..000000000 --- a/pkg/tarsum/versioning.go +++ /dev/null @@ -1,150 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go deleted file mode 100644 index 88e0a5783..000000000 --- a/pkg/tarsum/versioning_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package tarsum - -import ( - "testing" -) - -func TestVersionLabelForChecksum(t *testing.T) { - version := VersionLabelForChecksum("tarsum+sha256:deadbeef") - if version != "tarsum" { - t.Fatalf("Version should have been 'tarsum', was %v", version) - } - version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") - if version != "tarsum.v1" { - t.Fatalf("Version should have been 'tarsum.v1', was %v", version) - } - version = VersionLabelForChecksum("something+somethingelse") - if version != "something" { - t.Fatalf("Version should have been 'something', was %v", version) - } - version = VersionLabelForChecksum("invalidChecksum") - if version != "" { - t.Fatalf("Version should have been empty, was %v", version) - } -} - -func TestVersion(t *testing.T) { - expected := "tarsum" - var v Version - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.v1" - v = 1 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.dev" - v = 2 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } -} - -func TestGetVersion(t *testing.T) { - testSet := []struct { - Str string - Expected Version - }{ - {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, - {"tarsum+sha256", Version0}, - {"tarsum", Version0}, - {"tarsum.dev", VersionDev}, - {"tarsum.dev+sha256:deadbeef", VersionDev}, - } - - for _, ts := range testSet { - v, err := GetVersionFromTarsum(ts.Str) - if err != nil { - t.Fatalf("%q : %s", err, ts.Str) - } - if v != ts.Expected { - t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) - } - } - - // test one that does not exist, to ensure it errors - str := "weak+md5:abcdeabcde" - _, err := GetVersionFromTarsum(str) - if err != ErrNotVersion { - t.Fatalf("%q : %s", err, str) - } -} - -func TestGetVersions(t *testing.T) { - expected := []Version{ - Version0, - Version1, - VersionDev, - } - versions := GetVersions() - if len(versions) != len(expected) { - t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) - } - if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { - t.Fatalf("Expected [%v], got [%v]", expected, versions) - } -} - -func containsVersion(versions []Version, version Version) bool { - for _, v := range versions { - if v == version { - return true - } - } - return false -} diff --git a/pkg/tarsum/writercloser.go b/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde3..000000000 --- a/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/pkg/term/ascii.go b/pkg/term/ascii.go deleted file mode 100644 index f5262bccf..000000000 --- a/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/pkg/term/ascii_test.go b/pkg/term/ascii_test.go deleted file mode 100644 index 4a1e7f302..000000000 --- a/pkg/term/ascii_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package term - -import "testing" - -func TestToBytes(t *testing.T) { - codes, err := ToBytes("ctrl-a,a") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 1 || codes[1] != 97 { - t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) - } - - codes, err = ToBytes("shift-z") - if err == nil { - t.Fatalf("Expected error, got none") - } - - codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - if err != nil { - t.Fatal(err) - } - if len(codes) != 4 { - t.Fatalf("Expected 4 codes, got %d", len(codes)) - } - if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { - t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) - } - - codes, err = ToBytes("DEL,+") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 127 || codes[1] != 43 { - t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) - } -} diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8..000000000 --- a/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/pkg/term/tc_other.go b/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f6..000000000 --- a/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/pkg/term/tc_solaris_cgo.go b/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca..000000000 --- a/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/pkg/term/term.go b/pkg/term/term.go deleted file mode 100644 index 8f554847f..000000000 --- a/pkg/term/term.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build !windows - -// Package term provides provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "io" - "os" - "os/signal" - "syscall" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - }() -} diff --git a/pkg/term/term_solaris.go b/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec..000000000 --- a/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/pkg/term/term_unix.go b/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e5..000000000 --- a/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go deleted file mode 100644 index 9bc52a8c6..000000000 --- a/pkg/term/term_windows.go +++ /dev/null @@ -1,303 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - inMode, outMode uint32 - inHandle, outHandle syscall.Handle -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// usingNativeConsole is true if we are using the Windows native console -var usingNativeConsole bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - switch { - case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu terminal emulates ANSI on output streams well. - return windows.ConEmuStreams() - case os.Getenv("MSYSTEM") != "": - // MSYS (mingw) does not emulate ANSI well. - return windows.ConsoleStreams() - default: - if useNativeConsole() { - usingNativeConsole = true - return os.Stdin, os.Stdout, os.Stderr - } - return windows.ConsoleStreams() - } -} - -// useNativeConsole determines if the docker client should use the built-in -// console which supports ANSI emulation, or fall-back to the golang emulator -// (github.com/azure/go-ansiterm). -func useNativeConsole() bool { - osv := system.GetOSVersion() - - // Native console is not available before major version 10 - if osv.MajorVersion < 10 { - return false - } - - // Get the console modes. If this fails, we can't use the native console - state, err := getNativeConsole() - if err != nil { - return false - } - - // Probe the console to see if it can be enabled. - if nil != probeNativeConsole(state) { - return false - } - - // Environment variable override - if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { - if e == "1" { - return true - } - return false - } - - // Must have a post-TP5 RS1 build of Windows Server 2016/Windows 10 for - // the native console to be usable. - if osv.Build < 14350 { - return false - } - - return true -} - -// getNativeConsole returns the console modes ('state') for the native Windows console -func getNativeConsole() (State, error) { - var ( - err error - state State - ) - - // Get the handle to stdout - if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdout handle - if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil { - return state, err - } - - // Get the handle to stdin - if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdin handle - if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil { - return state, err - } - - return state, nil -} - -// probeNativeConsole probes the console to determine if native can be supported, -func probeNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - - return nil -} - -// enableNativeConsole turns on native console mode -func enableNativeConsole(state State) error { - // First attempt both enableVirtualTerminalProcessing and disableNewlineAutoReturn - if err := winterm.SetConsoleMode(uintptr(state.outHandle), - state.outMode|(enableVirtualTerminalProcessing|disableNewlineAutoReturn)); err != nil { - - // That may fail, so fallback to trying just enableVirtualTerminalProcessing - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - } - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can - return err - } - - return nil -} - -// disableNativeConsole turns off native console mode -func disableNativeConsole(state *State) error { - // Try and restore both in an out before error checking. - errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - if errout != nil { - return errout - } - if errin != nil { - return errin - } - return nil -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if usingNativeConsole { - return disableNativeConsole(state) - } - return winterm.SetConsoleMode(fd, state.outMode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - if usingNativeConsole { - state, err := getNativeConsole() - if err != nil { - return nil, err - } - return &state, nil - } - - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{outMode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.inMode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.inMode - if usingNativeConsole { - if err := enableNativeConsole(*state); err != nil { - return nil, err - } - mode |= enableVirtualTerminalInput - } - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6ae..000000000 --- a/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/pkg/term/termios_openbsd.go b/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/windows/ansi_reader.go b/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 1db920db1..000000000 --- a/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -func newAnsiReader(nFile int) *ansiReader { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/pkg/term/windows/ansi_writer.go b/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 0299a8842..000000000 --- a/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -package windows - -import ( - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -func newAnsiWriter(nFile int) *ansiWriter { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/pkg/term/windows/console.go b/pkg/term/windows/console.go deleted file mode 100644 index 3036a0460..000000000 --- a/pkg/term/windows/console.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" - "io/ioutil" -) - -// ConEmuStreams returns prepared versions of console streams, -// for proper use in ConEmu terminal. -// The ConEmu terminal emulates ANSI on output streams well by default. -func ConEmuStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - stdOut = os.Stdout - stdErr = os.Stderr - - // WARNING (BEGIN): sourced from newAnsiWriter - - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - // WARNING (END): sourced from newAnsiWriter - - return stdIn, stdOut, stdErr -} - -// ConsoleStreams returns a wrapped version for each standard stream referencing a console, -// that handles ANSI character sequences. -func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if IsConsole(os.Stdout.Fd()) { - stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if IsConsole(os.Stderr.Fd()) { - stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/pkg/term/windows/windows.go b/pkg/term/windows/windows.go deleted file mode 100644 index ce4cb5990..000000000 --- a/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/pkg/term/windows/windows_test.go b/pkg/term/windows/windows_test.go deleted file mode 100644 index 52aeab54e..000000000 --- a/pkg/term/windows/windows_test.go +++ /dev/null @@ -1,3 +0,0 @@ -// This file is necessary to pass the Docker tests. - -package windows diff --git a/pkg/tlsconfig/config.go b/pkg/tlsconfig/config.go deleted file mode 100644 index e3dfad1f0..000000000 --- a/pkg/tlsconfig/config.go +++ /dev/null @@ -1,133 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, -} - -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - s := certPool.Subjects() - subjects := make([]string, len(s)) - for i, subject := range s { - subjects[i] = string(subject) - } - logrus.Debugf("Trusting certs with subjects: %v", subjects) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" && options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return &tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return &tlsConfig, nil -} diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go deleted file mode 100644 index 02610b8b7..000000000 --- a/pkg/truncindex/truncindex.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package truncindex provides a general 'index tree', used by Docker -// in order to be able to reference containers by only a few unambiguous -// characters of their id. -package truncindex - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/tchap/go-patricia/patricia" -) - -var ( - // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") - - // ErrIllegalChar is returned when a space is in the ID - ErrIllegalChar = errors.New("illegal character: ' '") - - // ErrNotExist is returned when ID or its prefix not found in index. - ErrNotExist = errors.New("ID does not exist") -) - -// ErrAmbiguousPrefix is returned if the prefix was ambiguous -// (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { - prefix string -} - -func (e ErrAmbiguousPrefix) Error() string { - return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) -} - -// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. -// This is used to retrieve image and container IDs by more convenient shorthand prefixes. -type TruncIndex struct { - sync.RWMutex - trie *patricia.Trie - ids map[string]struct{} -} - -// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. -func NewTruncIndex(ids []string) (idx *TruncIndex) { - idx = &TruncIndex{ - ids: make(map[string]struct{}), - - // Change patricia max prefix per node length, - // because our len(ID) always 64 - trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), - } - for _, id := range ids { - idx.addID(id) - } - return -} - -func (idx *TruncIndex) addID(id string) error { - if strings.Contains(id, " ") { - return ErrIllegalChar - } - if id == "" { - return ErrEmptyPrefix - } - if _, exists := idx.ids[id]; exists { - return fmt.Errorf("id already exists: '%s'", id) - } - idx.ids[id] = struct{}{} - if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("failed to insert id: %s", id) - } - return nil -} - -// Add adds a new ID to the TruncIndex. -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() - if err := idx.addID(id); err != nil { - return err - } - return nil -} - -// Delete removes an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Delete(id string) error { - idx.Lock() - defer idx.Unlock() - if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("no such id: '%s'", id) - } - delete(idx.ids, id) - if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("no such id: '%s'", id) - } - return nil -} - -// Get retrieves an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Get(s string) (string, error) { - if s == "" { - return "", ErrEmptyPrefix - } - var ( - id string - ) - subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { - if id != "" { - // we haven't found the ID if there are two or more IDs - id = "" - return ErrAmbiguousPrefix{prefix: string(prefix)} - } - id = string(prefix) - return nil - } - - idx.RLock() - defer idx.RUnlock() - if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", err - } - if id != "" { - return id, nil - } - return "", ErrNotExist -} - -// Iterate iterates over all stored IDs, and passes each of them to the given handler. -func (idx *TruncIndex) Iterate(handler func(id string)) { - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { - handler(string(prefix)) - return nil - }) -} diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go deleted file mode 100644 index 8197baf7d..000000000 --- a/pkg/truncindex/truncindex_test.go +++ /dev/null @@ -1,429 +0,0 @@ -package truncindex - -import ( - "math/rand" - "testing" - - "github.com/docker/docker/pkg/stringid" -) - -// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. -func TestTruncIndex(t *testing.T) { - ids := []string{} - index := NewTruncIndex(ids) - // Get on an empty index - if _, err := index.Get("foobar"); err == nil { - t.Fatal("Get on an empty index should return an error") - } - - // Spaces should be illegal in an id - if err := index.Add("I have a space"); err == nil { - t.Fatalf("Adding an id with ' ' should return an error") - } - - id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" - // Add an id - if err := index.Add(id); err != nil { - t.Fatal(err) - } - - // Add an empty id (should fail) - if err := index.Add(""); err == nil { - t.Fatalf("Adding an empty id should return an error") - } - - // Get a non-existing id - assertIndexGet(t, index, "abracadabra", "", true) - // Get an empty id - assertIndexGet(t, index, "", "", true) - // Get the exact id - assertIndexGet(t, index, id, id, false) - // The first letter should match - assertIndexGet(t, index, id[:1], id, false) - // The first half should match - assertIndexGet(t, index, id[:len(id)/2], id, false) - // The second half should NOT match - assertIndexGet(t, index, id[len(id)/2:], "", true) - - id2 := id[:6] + "blabla" - // Add an id - if err := index.Add(id2); err != nil { - t.Fatal(err) - } - // Both exact IDs should work - assertIndexGet(t, index, id, id, false) - assertIndexGet(t, index, id2, id2, false) - - // 6 characters or less should conflict - assertIndexGet(t, index, id[:6], "", true) - assertIndexGet(t, index, id[:4], "", true) - assertIndexGet(t, index, id[:1], "", true) - - // An ambiguous id prefix should return an error - if _, err := index.Get(id[:4]); err == nil { - t.Fatal("An ambiguous id prefix should return an error") - } - - // 7 characters should NOT conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id2[:7], id2, false) - - // Deleting a non-existing id should return an error - if err := index.Delete("non-existing"); err == nil { - t.Fatalf("Deleting a non-existing id should return an error") - } - - // Deleting an empty id should return an error - if err := index.Delete(""); err == nil { - t.Fatal("Deleting an empty id should return an error") - } - - // Deleting id2 should remove conflicts - if err := index.Delete(id2); err != nil { - t.Fatal(err) - } - // id2 should no longer work - assertIndexGet(t, index, id2, "", true) - assertIndexGet(t, index, id2[:7], "", true) - assertIndexGet(t, index, id2[:11], "", true) - - // conflicts between id and id2 should be gone - assertIndexGet(t, index, id[:6], id, false) - assertIndexGet(t, index, id[:4], id, false) - assertIndexGet(t, index, id[:1], id, false) - - // non-conflicting substrings should still not conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id[:15], id, false) - assertIndexGet(t, index, id, id, false) - - assertIndexIterate(t) -} - -func assertIndexIterate(t *testing.T) { - ids := []string{ - "19b36c2c326ccc11e726eee6ee78a0baf166ef96", - "28b36c2c326ccc11e726eee6ee78a0baf166ef96", - "37b36c2c326ccc11e726eee6ee78a0baf166ef96", - "46b36c2c326ccc11e726eee6ee78a0baf166ef96", - } - - index := NewTruncIndex(ids) - - index.Iterate(func(targetId string) { - for _, id := range ids { - if targetId == id { - return - } - } - - t.Fatalf("An unknown ID '%s'", targetId) - }) -} - -func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { - if result, err := index.Get(input); err != nil && !expectError { - t.Fatalf("Unexpected error getting '%s': %s", input, err) - } else if err == nil && expectError { - t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) - } else if result != expectedResult { - t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) - } -} - -func BenchmarkTruncIndexAdd100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexAdd250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexAdd500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexGet100(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexGet250(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexGet500(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexDelete100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexDelete250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexDelete500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexNew100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexNew250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexNew500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexAddGet100(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexAddGet250(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexAddGet500(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} diff --git a/pkg/urlutil/urlutil.go b/pkg/urlutil/urlutil.go deleted file mode 100644 index 44152873b..000000000 --- a/pkg/urlutil/urlutil.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package urlutil provides helper function to check urls kind. -// It supports http urls, git urls and transport url (tcp://, …) -package urlutil - -import ( - "regexp" - "strings" -) - -var ( - validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, - "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, - } - urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") -) - -// IsURL returns true if the provided str is an HTTP(S) URL. -func IsURL(str string) bool { - return checkURL(str, "url") -} - -// IsGitURL returns true if the provided str is a git repository URL. -func IsGitURL(str string) bool { - if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { - return true - } - return checkURL(str, "git") -} - -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - -// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. -func IsTransportURL(str string) bool { - return checkURL(str, "transport") -} - -func checkURL(str, kind string) bool { - for _, prefix := range validPrefixes[kind] { - if strings.HasPrefix(str, prefix) { - return true - } - } - return false -} diff --git a/pkg/urlutil/urlutil_test.go b/pkg/urlutil/urlutil_test.go deleted file mode 100644 index 75eb464fe..000000000 --- a/pkg/urlutil/urlutil_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package urlutil - -import "testing" - -var ( - gitUrls = []string{ - "git://github.com/docker/docker", - "git@github.com:docker/docker.git", - "git@bitbucket.org:atlassianlabs/atlassian-docker.git", - "https://github.com/docker/docker.git", - "http://github.com/docker/docker.git", - "http://github.com/docker/docker.git#branch", - "http://github.com/docker/docker.git#:dir", - } - incompleteGitUrls = []string{ - "github.com/docker/docker", - } - invalidGitUrls = []string{ - "http://github.com/docker/docker.git:#branch", - } - transportUrls = []string{ - "tcp://example.com", - "tcp+tls://example.com", - "udp://example.com", - "unix:///example", - "unixgram:///example", - } -) - -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if IsGitTransport(url) == false { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitTransport(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsGIT(t *testing.T) { - for _, url := range gitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range invalidGitUrls { - if IsGitURL(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsTransport(t *testing.T) { - for _, url := range transportUrls { - if IsTransportURL(url) == false { - t.Fatalf("%q should be detected as valid Transport url", url) - } - } -} diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md deleted file mode 100644 index d9cb367d1..000000000 --- a/pkg/useragent/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions to pack version information into a single User-Agent header. diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go deleted file mode 100644 index 1137db51b..000000000 --- a/pkg/useragent/useragent.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package useragent provides helper functions to pack -// version information into a single User-Agent header. -package useragent - -import ( - "strings" -) - -// VersionInfo is used to model UserAgent versions. -type VersionInfo struct { - Name string - Version string -} - -func (vi *VersionInfo) isValid() bool { - const stopChars = " \t\r\n/" - name := vi.Name - vers := vi.Version - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// AppendVersions converts versions to a string and appends the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the name field, while -// version is get from the version field. Several pieces of version information -// will be concatenated and separated by space. -// -// Example: -// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) -// results in "base foo/1.0 bar/2.0". -func AppendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !v.isValid() { - continue - } - verstrs = append(verstrs, v.Name+"/"+v.Version) - } - return strings.Join(verstrs, " ") -} diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go deleted file mode 100644 index 0ad7243a6..000000000 --- a/pkg/useragent/useragent_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package useragent - -import "testing" - -func TestVersionInfo(t *testing.T) { - vi := VersionInfo{"foo", "bar"} - if !vi.isValid() { - t.Fatalf("VersionInfo should be valid") - } - vi = VersionInfo{"", "bar"} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } - vi = VersionInfo{"foo", ""} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } -} - -func TestAppendVersions(t *testing.T) { - vis := []VersionInfo{ - {"foo", "1.0"}, - {"bar", "0.1"}, - {"pi", "3.1.4"}, - } - v := AppendVersions("base", vis...) - expect := "base foo/1.0 bar/0.1 pi/3.1.4" - if v != expect { - t.Fatalf("expected %q, got %q", expect, v) - } -} diff --git a/plugin/backend.go b/plugin/backend.go deleted file mode 100644 index ada623950..000000000 --- a/plugin/backend.go +++ /dev/null @@ -1,140 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/plugin/distribution" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// Disable deactivates a plugin, which implies that they cannot be used by containers. -func (pm *Manager) Disable(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - return pm.disable(p) -} - -// Enable activates a plugin, which implies that they are ready to be used by containers. -func (pm *Manager) Enable(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - return pm.enable(p) -} - -// Inspect examines a plugin manifest -func (pm *Manager) Inspect(name string) (tp types.Plugin, err error) { - p, err := pm.get(name) - if err != nil { - return tp, err - } - return p.P, nil -} - -// Pull pulls a plugin and enables it. -func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - logrus.Debugf("error in reference.ParseNamed: %v", err) - return nil, err - } - name = ref.String() - - if p, _ := pm.get(name); p != nil { - logrus.Debugf("plugin already exists") - return nil, fmt.Errorf("%s exists", name) - } - - pluginID := stringid.GenerateNonCryptoID() - - if err := os.MkdirAll(filepath.Join(pm.libRoot, pluginID), 0755); err != nil { - logrus.Debugf("error in MkdirAll: %v", err) - return nil, err - } - - pd, err := distribution.Pull(name, pm.registryService, metaHeader, authConfig) - if err != nil { - logrus.Debugf("error in distribution.Pull(): %v", err) - return nil, err - } - - if err := distribution.WritePullData(pd, filepath.Join(pm.libRoot, pluginID), true); err != nil { - logrus.Debugf("error in distribution.WritePullData(): %v", err) - return nil, err - } - - p := pm.newPlugin(ref, pluginID) - if err := pm.initPlugin(p); err != nil { - return nil, err - } - - pm.Lock() - pm.plugins[pluginID] = p - pm.nameToID[name] = pluginID - pm.save() - pm.Unlock() - - return computePrivileges(&p.P.Manifest), nil -} - -// List displays the list of plugins and associated metadata. -func (pm *Manager) List() ([]types.Plugin, error) { - out := make([]types.Plugin, 0, len(pm.plugins)) - for _, p := range pm.plugins { - out = append(out, p.P) - } - return out, nil -} - -// Push pushes a plugin to the store. -func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error { - p, err := pm.get(name) - if err != nil { - return err - } - dest := filepath.Join(pm.libRoot, p.P.ID) - config, err := os.Open(filepath.Join(dest, "manifest.json")) - if err != nil { - return err - } - defer config.Close() - - rootfs, err := archive.Tar(filepath.Join(dest, "rootfs"), archive.Gzip) - if err != nil { - return err - } - _, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs) - // XXX: Ignore returning digest for now. - // Since digest needs to be written to the ProgressWriter. - return err -} - -// Remove deletes plugin's root directory. -func (pm *Manager) Remove(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - return pm.remove(p) -} - -// Set sets plugin args -func (pm *Manager) Set(name string, args []string) error { - p, err := pm.get(name) - if err != nil { - return err - } - return pm.set(p, args) -} diff --git a/plugin/distribution/pull.go b/plugin/distribution/pull.go deleted file mode 100644 index 1bae8d4bb..000000000 --- a/plugin/distribution/pull.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build experimental - -package distribution - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema2" - dockerdist "github.com/docker/docker/distribution" - archive "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PullData is the plugin manifest and the rootfs -type PullData interface { - Config() ([]byte, error) - Layer() (io.ReadCloser, error) -} - -type pullData struct { - repository distribution.Repository - manifest schema2.Manifest - index int -} - -func (pd *pullData) Config() ([]byte, error) { - blobs := pd.repository.Blobs(context.Background()) - config, err := blobs.Get(context.Background(), pd.manifest.Config.Digest) - if err != nil { - return nil, err - } - // validate - var p types.Plugin - if err := json.Unmarshal(config, &p); err != nil { - return nil, err - } - return config, nil -} - -func (pd *pullData) Layer() (io.ReadCloser, error) { - if pd.index >= len(pd.manifest.Layers) { - return nil, io.EOF - } - - blobs := pd.repository.Blobs(context.Background()) - rsc, err := blobs.Open(context.Background(), pd.manifest.Layers[pd.index].Digest) - if err != nil { - return nil, err - } - pd.index++ - return rsc, nil -} - -// Pull downloads the plugin from Store -func Pull(name string, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - logrus.Debugf("pull.go: error in ParseNamed: %v", err) - return nil, err - } - - repoInfo, err := rs.ResolveRepository(ref) - if err != nil { - logrus.Debugf("pull.go: error in ResolveRepository: %v", err) - return nil, err - } - - if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { - logrus.Debugf("pull.go: error in ValidateRepoName: %v", err) - return nil, err - } - - endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname()) - if err != nil { - logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err) - return nil, err - } - - var confirmedV2 bool - var repository distribution.Repository - - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - // TODO: reuse contexts - repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull") - if err != nil { - logrus.Debugf("pull.go: error in NewV2Repository: %v", err) - return nil, err - } - if !confirmedV2 { - logrus.Debugf("pull.go: !confirmedV2") - return nil, ErrUnsupportedRegistry - } - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - break - } - - tag := DefaultTag - if ref, ok := ref.(reference.NamedTagged); ok { - tag = ref.Tag() - } - - // tags := repository.Tags(context.Background()) - // desc, err := tags.Get(context.Background(), tag) - // if err != nil { - // return nil, err - // } - // - msv, err := repository.Manifests(context.Background()) - if err != nil { - logrus.Debugf("pull.go: error in repository.Manifests: %v", err) - return nil, err - } - manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag)) - if err != nil { - // TODO: change 401 to 404 - logrus.Debugf("pull.go: error in msv.Get(): %v", err) - return nil, err - } - - _, pl, err := manifest.Payload() - if err != nil { - logrus.Debugf("pull.go: error in manifest.Payload(): %v", err) - return nil, err - } - var m schema2.Manifest - if err := json.Unmarshal(pl, &m); err != nil { - logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err) - return nil, err - } - if m.Config.MediaType != MediaTypeConfig { - return nil, ErrUnsupportedMediaType - } - - pd := &pullData{ - repository: repository, - manifest: m, - } - - logrus.Debugf("manifest: %s", pl) - return pd, nil -} - -// WritePullData extracts manifest and rootfs to the disk. -func WritePullData(pd PullData, dest string, extract bool) error { - config, err := pd.Config() - if err != nil { - return err - } - var p types.Plugin - if err := json.Unmarshal(config, &p); err != nil { - return err - } - logrus.Debugf("%#v", p) - - if err := os.MkdirAll(dest, 0700); err != nil { - return err - } - - if extract { - if err := ioutil.WriteFile(filepath.Join(dest, "manifest.json"), config, 0600); err != nil { - return err - } - - if err := os.MkdirAll(filepath.Join(dest, "rootfs"), 0700); err != nil { - return err - } - } - - for i := 0; ; i++ { - l, err := pd.Layer() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if !extract { - f, err := os.Create(filepath.Join(dest, fmt.Sprintf("layer%d.tar", i))) - if err != nil { - l.Close() - return err - } - io.Copy(f, l) - l.Close() - f.Close() - continue - } - - if _, err := archive.ApplyLayer(filepath.Join(dest, "rootfs"), l); err != nil { - return err - } - - } - return nil -} diff --git a/plugin/distribution/push.go b/plugin/distribution/push.go deleted file mode 100644 index 27e717af4..000000000 --- a/plugin/distribution/push.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build experimental - -package distribution - -import ( - "crypto/sha256" - "io" - "net/http" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" - dockerdist "github.com/docker/docker/distribution" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Push pushes a plugin to a registry. -func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - return "", err - } - - repoInfo, err := rs.ResolveRepository(ref) - if err != nil { - return "", err - } - - if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { - return "", err - } - - endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname()) - if err != nil { - return "", err - } - - var confirmedV2 bool - var repository distribution.Repository - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull") - if err != nil { - return "", err - } - if !confirmedV2 { - return "", ErrUnsupportedRegistry - } - logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - // This means that we found an endpoint. and we are ready to push - break - } - - // Returns a reference to the repository's blob service. - blobs := repository.Blobs(context.Background()) - - // Descriptor = {mediaType, size, digest} - var descs []distribution.Descriptor - - for i, f := range []io.ReadCloser{config, layers} { - bw, err := blobs.Create(context.Background()) - if err != nil { - logrus.Debugf("Error in blobs.Create: %v", err) - return "", err - } - h := sha256.New() - r := io.TeeReader(f, h) - _, err = io.Copy(bw, r) - if err != nil { - f.Close() - logrus.Debugf("Error in io.Copy: %v", err) - return "", err - } - f.Close() - mt := MediaTypeLayer - if i == 0 { - mt = MediaTypeConfig - } - // Commit completes the write process to the BlobService. - // The descriptor arg to Commit is called the "provisional" descriptor and - // used for validation. - // The returned descriptor should be the one used. Its called the "Canonical" - // descriptor. - desc, err := bw.Commit(context.Background(), distribution.Descriptor{ - MediaType: mt, - // XXX: What about the Size? - Digest: digest.NewDigest("sha256", h), - }) - if err != nil { - logrus.Debugf("Error in bw.Commit: %v", err) - return "", err - } - // The canonical descriptor is set the mediatype again, just in case. - // Don't touch the digest or the size here. - desc.MediaType = mt - logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest) - descs = append(descs, desc) - } - - // XXX: schema2.Versioned needs a MediaType as well. - // "application/vnd.docker.distribution.manifest.v2+json" - m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]}) - if err != nil { - logrus.Debugf("error in schema2.FromStruct: %v", err) - return "", err - } - - msv, err := repository.Manifests(context.Background()) - if err != nil { - logrus.Debugf("error in repository.Manifests: %v", err) - return "", err - } - - _, pl, err := m.Payload() - if err != nil { - logrus.Debugf("error in m.Payload: %v", err) - return "", err - } - - logrus.Debugf("Pushed manifest: %s", pl) - - tag := DefaultTag - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - - return msv.Put(context.Background(), m, distribution.WithTag(tag)) -} diff --git a/plugin/distribution/types.go b/plugin/distribution/types.go deleted file mode 100644 index 0b1fd7aea..000000000 --- a/plugin/distribution/types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build experimental - -package distribution - -import "errors" - -// ErrUnsupportedRegistry indicates that the registry does not support v2 protocol -var ErrUnsupportedRegistry = errors.New("only V2 repositories are supported for plugin distribution") - -// ErrUnsupportedMediaType indicates we are pulling content that's not a plugin -var ErrUnsupportedMediaType = errors.New("content is not a plugin") - -// Plugin related media types -const ( - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - MediaTypeConfig = "application/vnd.docker.plugin.v0+json" - MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - DefaultTag = "latest" -) diff --git a/plugin/interface.go b/plugin/interface.go deleted file mode 100644 index 4a3ed64df..000000000 --- a/plugin/interface.go +++ /dev/null @@ -1,9 +0,0 @@ -package plugin - -import "github.com/docker/docker/pkg/plugins" - -// Plugin represents a plugin. It is used to abstract from an older plugin architecture (in pkg/plugins). -type Plugin interface { - Client() *plugins.Client - Name() string -} diff --git a/plugin/legacy.go b/plugin/legacy.go deleted file mode 100644 index 8ea4c0da9..000000000 --- a/plugin/legacy.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !experimental - -package plugin - -import "github.com/docker/docker/pkg/plugins" - -// FindWithCapability returns a list of plugins matching the given capability. -func FindWithCapability(capability string) ([]Plugin, error) { - pl, err := plugins.GetAll(capability) - if err != nil { - return nil, err - } - result := make([]Plugin, len(pl)) - for i, p := range pl { - result[i] = p - } - return result, nil -} - -// LookupWithCapability returns a plugin matching the given name and capability. -func LookupWithCapability(name, capability string) (Plugin, error) { - return plugins.Get(name, capability) -} diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go deleted file mode 100644 index 51dfa5cf9..000000000 --- a/profiles/apparmor/apparmor.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build linux - -package apparmor - -import ( - "bufio" - "io" - "os" - "path" - "strings" - - "github.com/docker/docker/pkg/aaparser" - "github.com/docker/docker/utils/templates" -) - -var ( - // profileDirectory is the file store for apparmor profiles and macros. - profileDirectory = "/etc/apparmor.d" - // defaultProfilePath is the default path for the apparmor profile to be saved. - defaultProfilePath = path.Join(profileDirectory, "docker") -) - -// profileData holds information about the given profile for generation. -type profileData struct { - // Name is profile name. - Name string - // Imports defines the apparmor functions to import, before defining the profile. - Imports []string - // InnerImports defines the apparmor functions to import in the profile. - InnerImports []string - // Version is the {major, minor, patch} version of apparmor_parser as a single number. - Version int -} - -// generateDefault creates an apparmor profile from ProfileData. -func (p *profileData) generateDefault(out io.Writer) error { - compiled, err := templates.NewParse("apparmor_profile", baseTemplate) - if err != nil { - return err - } - - if macroExists("tunables/global") { - p.Imports = append(p.Imports, "#include ") - } else { - p.Imports = append(p.Imports, "@{PROC}=/proc/") - } - - if macroExists("abstractions/base") { - p.InnerImports = append(p.InnerImports, "#include ") - } - - ver, err := aaparser.GetVersion() - if err != nil { - return err - } - p.Version = ver - - if err := compiled.Execute(out, p); err != nil { - return err - } - return nil -} - -// macrosExists checks if the passed macro exists. -func macroExists(m string) bool { - _, err := os.Stat(path.Join(profileDirectory, m)) - return err == nil -} - -// InstallDefault generates a default profile and installs it in the -// ProfileDirectory with `apparmor_parser`. -func InstallDefault(name string) error { - // Make sure the path where they want to save the profile exists - if err := os.MkdirAll(profileDirectory, 0755); err != nil { - return err - } - - p := profileData{ - Name: name, - } - - f, err := os.OpenFile(defaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - if err := p.generateDefault(f); err != nil { - f.Close() - return err - } - f.Close() - - if err := aaparser.LoadProfile(defaultProfilePath); err != nil { - return err - } - - return nil -} - -// IsLoaded checks if a passed profile has been loaded into the kernel. -func IsLoaded(name string) error { - file, err := os.Open("/sys/kernel/security/apparmor/profiles") - if err != nil { - return err - } - r := bufio.NewReader(file) - for { - p, err := r.ReadString('\n') - if err != nil { - return err - } - if strings.HasPrefix(p, name+" ") { - return nil - } - } -} diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go deleted file mode 100644 index ada33bf0f..000000000 --- a/profiles/apparmor/template.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build linux - -package apparmor - -// baseTemplate defines the default apparmor profile for containers. -const baseTemplate = ` -{{range $value := .Imports}} -{{$value}} -{{end}} - -profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { -{{range $value := .InnerImports}} - {{$value}} -{{end}} - - network, - capability, - file, - umount, - - deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) - # deny write to files not in /proc//** or /proc/sys/** - deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, - deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) - deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/kcore rwklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, - -{{if ge .Version 208095}} - # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container - ptrace (trace,read) peer=docker-default, -{{end}} -} -` diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json deleted file mode 100755 index 40af6ad3b..000000000 --- a/profiles/seccomp/default.json +++ /dev/null @@ -1,1593 +0,0 @@ -{ - "defaultAction": "SCMP_ACT_ERRNO", - "architectures": [ - "SCMP_ARCH_X86_64", - "SCMP_ARCH_X86", - "SCMP_ARCH_X32" - ], - "syscalls": [ - { - "name": "accept", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "accept4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "access", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "alarm", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "bind", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "brk", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "capget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "capset", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chmod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_getres", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_nanosleep", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "close", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "connect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "copy_file_range", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "creat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup3", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_create1", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_ctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_ctl_old", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_pwait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_wait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_wait_old", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "eventfd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "eventfd2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "execve", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "execveat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "exit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "exit_group", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "faccessat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fadvise64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fadvise64_64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fallocate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fanotify_mark", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchmod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchmodat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchownat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fcntl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fcntl64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fdatasync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fgetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "flistxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "flock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fork", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fremovexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fsetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatfs64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fsync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ftruncate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ftruncate64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "futex", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "futimesat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getcpu", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getcwd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getdents", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getdents64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getegid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getegid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "geteuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "geteuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgroups", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgroups32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getitimer", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpeername", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpgrp", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getppid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpriority", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrandom", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "get_robust_list", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrusage", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsockname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsockopt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "get_thread_area", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "gettid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "gettimeofday", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_add_watch", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_init", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_init1", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_rm_watch", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_cancel", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_destroy", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_getevents", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioprio_get", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioprio_set", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_setup", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_submit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ipc", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "kill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lgetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "link", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "linkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "listen", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "listxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "llistxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "_llseek", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lremovexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lseek", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lsetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lstat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lstat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "madvise", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "memfd_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mincore", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mkdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mkdirat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mknod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mknodat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlock2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlockall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mmap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mmap2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mprotect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_getsetattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_notify", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_timedreceive", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_timedsend", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_unlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mremap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgrcv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgsnd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munlock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munlockall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munmap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "nanosleep", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "newfstatat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "_newselect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "openat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pause", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 0, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 8, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 4294967295, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "pipe", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pipe2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "poll", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ppoll", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "prctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pread64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "preadv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "prlimit64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pselect6", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pwrite64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pwritev", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "read", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readahead", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvfrom", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvmmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "remap_file_pages", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "removexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rename", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "renameat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "renameat2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "restart_syscall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rmdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigaction", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigpending", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigprocmask", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigqueueinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigreturn", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigsuspend", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigtimedwait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_tgsigqueueinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getaffinity", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getparam", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_get_priority_max", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_get_priority_min", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getscheduler", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_rr_get_interval", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setaffinity", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setparam", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setscheduler", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_yield", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "seccomp", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "select", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semop", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semtimedop", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "send", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendfile", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendfile64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendmmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendto", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgroups", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgroups32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setitimer", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setpgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setpriority", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setregid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setregid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setreuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setreuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_robust_list", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setsid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setsockopt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_thread_area", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_tid_address", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmdt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shutdown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sigaltstack", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "signalfd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "signalfd4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sigreturn", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socket", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socketcall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socketpair", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "splice", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "stat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "stat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "statfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "statfs64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "symlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "symlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sync_file_range", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "syncfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sysinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "syslog", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tee", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tgkill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "time", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_delete", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_settime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_getoverrun", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_settime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "times", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tkill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "truncate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "truncate64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ugetrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "umask", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "uname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "unlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "unlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utimensat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utimes", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "vfork", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "vmsplice", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "wait4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "waitid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "waitpid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "write", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "writev", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "arch_prctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "modify_ldt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chroot", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clone", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ] - } - ] -} \ No newline at end of file diff --git a/profiles/seccomp/fixtures/example.json b/profiles/seccomp/fixtures/example.json deleted file mode 100755 index 674ca50fd..000000000 --- a/profiles/seccomp/fixtures/example.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "defaultAction": "SCMP_ACT_ERRNO", - "syscalls": [ - { - "name": "clone", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ] - }, - { - "name": "open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "close", - "action": "SCMP_ACT_ALLOW", - "args": [] - } - ] -} diff --git a/profiles/seccomp/generate.go b/profiles/seccomp/generate.go deleted file mode 100644 index 059370bff..000000000 --- a/profiles/seccomp/generate.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/oci" - "github.com/docker/docker/profiles/seccomp" -) - -// saves the default seccomp profile as a json file so people can use it as a -// base for their own custom profiles -func main() { - wd, err := os.Getwd() - if err != nil { - panic(err) - } - f := filepath.Join(wd, "default.json") - - rs := oci.DefaultSpec() - - // write the default profile to the file - b, err := json.MarshalIndent(seccomp.DefaultProfile(&rs), "", "\t") - if err != nil { - panic(err) - } - - if err := ioutil.WriteFile(f, b, 0644); err != nil { - panic(err) - } -} diff --git a/profiles/seccomp/seccomp.go b/profiles/seccomp/seccomp.go deleted file mode 100644 index 7a58e2c52..000000000 --- a/profiles/seccomp/seccomp.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build linux - -package seccomp - -import ( - "encoding/json" - "fmt" - - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" -) - -//go:generate go run -tags 'seccomp' generate.go - -// GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { - return setupSeccomp(DefaultProfile(rs)) -} - -// LoadProfile takes a file path and decodes the seccomp profile. -func LoadProfile(body string) (*specs.Seccomp, error) { - var config types.Seccomp - if err := json.Unmarshal([]byte(body), &config); err != nil { - return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) - } - - return setupSeccomp(&config) -} - -func setupSeccomp(config *types.Seccomp) (newConfig *specs.Seccomp, err error) { - if config == nil { - return nil, nil - } - - // No default action specified, no syscalls listed, assume seccomp disabled - if config.DefaultAction == "" && len(config.Syscalls) == 0 { - return nil, nil - } - - newConfig = &specs.Seccomp{} - - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) > 0 { - for _, arch := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(arch)) - } - } - - newConfig.DefaultAction = specs.Action(config.DefaultAction) - - // Loop through all syscall blocks and convert them to libcontainer format - for _, call := range config.Syscalls { - newCall := specs.Syscall{ - Name: call.Name, - Action: specs.Action(call.Action), - } - - // Loop through all the arguments of the syscall and convert them - for _, arg := range call.Args { - newArg := specs.Arg{ - Index: arg.Index, - Value: arg.Value, - ValueTwo: arg.ValueTwo, - Op: specs.Operator(arg.Op), - } - - newCall.Args = append(newCall.Args, newArg) - } - - newConfig.Syscalls = append(newConfig.Syscalls, newCall) - } - - return newConfig, nil -} diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go deleted file mode 100644 index 9e8d47f92..000000000 --- a/profiles/seccomp/seccomp_default.go +++ /dev/null @@ -1,1879 +0,0 @@ -// +build linux,seccomp - -package seccomp - -import ( - "syscall" - - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" - libseccomp "github.com/seccomp/libseccomp-golang" -) - -func arches() []types.Arch { - var native, err = libseccomp.GetNativeArch() - if err != nil { - return []types.Arch{} - } - var a = native.String() - switch a { - case "amd64": - return []types.Arch{types.ArchX86_64, types.ArchX86, types.ArchX32} - case "arm64": - return []types.Arch{types.ArchARM, types.ArchAARCH64} - case "mips64": - return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} - case "mips64n32": - return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} - case "mipsel64": - return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} - case "mipsel64n32": - return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} - case "s390x": - return []types.Arch{types.ArchS390, types.ArchS390X} - default: - return []types.Arch{} - } -} - -// DefaultProfile defines the whitelist for the default seccomp profile. -func DefaultProfile(rs *specs.Spec) *types.Seccomp { - - syscalls := []*types.Syscall{ - { - Name: "accept", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "accept4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "access", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "alarm", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "bind", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "brk", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "capget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "capset", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chmod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - - { - Name: "clock_getres", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clock_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clock_nanosleep", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "close", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "connect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "copy_file_range", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "creat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup3", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_create1", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_ctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_ctl_old", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_pwait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_wait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_wait_old", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "eventfd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "eventfd2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "execve", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "execveat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "exit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "exit_group", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "faccessat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fadvise64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fadvise64_64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fallocate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fanotify_mark", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchmod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchmodat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchownat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fcntl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fcntl64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fdatasync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fgetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "flistxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "flock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fork", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fremovexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fsetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatfs64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fsync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ftruncate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ftruncate64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "futex", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "futimesat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getcpu", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getcwd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getdents", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getdents64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getegid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getegid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "geteuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "geteuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgroups", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgroups32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getitimer", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpeername", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpgrp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getppid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpriority", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrandom", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "get_robust_list", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrusage", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsockname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsockopt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "get_thread_area", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "gettid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "gettimeofday", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_add_watch", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_init", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_init1", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_rm_watch", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_cancel", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_destroy", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_getevents", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioprio_get", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioprio_set", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_setup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_submit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ipc", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "kill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lgetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "link", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "linkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "listen", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "listxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "llistxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "_llseek", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lremovexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lseek", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lsetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lstat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lstat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "madvise", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "memfd_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mincore", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mkdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mkdirat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mknod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mknodat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlock2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlockall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mmap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mmap2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mprotect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_getsetattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_notify", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_timedreceive", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_timedsend", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_unlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mremap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgrcv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgsnd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munlock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munlockall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munmap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "nanosleep", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "newfstatat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "_newselect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "openat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pause", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0008, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0xffffffff, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "pipe", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pipe2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "poll", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ppoll", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "prctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pread64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "preadv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "prlimit64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pselect6", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pwrite64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pwritev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "read", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readahead", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvfrom", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvmmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "remap_file_pages", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "removexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rename", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "renameat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "renameat2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "restart_syscall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rmdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigaction", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigpending", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigprocmask", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigqueueinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigreturn", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigsuspend", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigtimedwait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_tgsigqueueinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getaffinity", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getparam", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_get_priority_max", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_get_priority_min", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getscheduler", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_rr_get_interval", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setaffinity", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setparam", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setscheduler", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_yield", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "seccomp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "select", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semop", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semtimedop", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "send", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendfile", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendfile64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendmmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendto", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgroups", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgroups32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setitimer", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setpgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setpriority", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setregid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setregid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setreuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setreuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_robust_list", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setsid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setsockopt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_thread_area", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_tid_address", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmdt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shutdown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sigaltstack", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "signalfd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "signalfd4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sigreturn", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socket", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socketcall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socketpair", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "splice", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "statfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "statfs64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "symlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "symlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sync_file_range", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "syncfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sysinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "syslog", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tee", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tgkill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "time", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_delete", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_settime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_getoverrun", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_settime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "times", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tkill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "truncate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "truncate64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ugetrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umask", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "uname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utimensat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utimes", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "vfork", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "vmsplice", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "wait4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "waitid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "waitpid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "write", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "writev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - } - - var sysCloneFlagsIndex uint - var arch string - var native, err = libseccomp.GetNativeArch() - if err == nil { - arch = native.String() - } - switch arch { - case "arm", "arm64": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "breakpoint", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "cacheflush", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_tls", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "amd64", "x32": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "arch_prctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - fallthrough - case "x86": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "modify_ldt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "s390", "s390x": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "s390_pci_mmio_read", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "s390_pci_mmio_write", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "s390_runtime_instr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - /* Flags parameter of the clone syscall is the 2nd on s390 */ - sysCloneFlagsIndex = 1 - } - - capSysAdmin := false - - var cap string - for _, cap = range rs.Process.Capabilities { - switch cap { - case "CAP_DAC_READ_SEARCH": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "name_to_handle_at", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "open_by_handle_at", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_ADMIN": - capSysAdmin = true - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "bpf", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clone", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fanotify_init", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lookup_dcookie", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mount", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "perf_event_open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setdomainname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sethostname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setns", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umount", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umount2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unshare", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_BOOT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "reboot", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_CHROOT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "chroot", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_MODULE": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "delete_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "init_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "finit_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "query_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_PACCT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "acct", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_PTRACE": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "kcmp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "process_vm_readv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "process_vm_writev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ptrace", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_RAWIO": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "iopl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioperm", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_TIME": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "settimeofday", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "adjtimex", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_TTY_CONFIG": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "vhangup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - } - } - - if !capSysAdmin { - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "clone", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: sysCloneFlagsIndex, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - }, - }...) - } - - return &types.Seccomp{ - DefaultAction: types.ActErrno, - Architectures: arches(), - Syscalls: syscalls, - } -} diff --git a/profiles/seccomp/seccomp_test.go b/profiles/seccomp/seccomp_test.go deleted file mode 100644 index 2c9929e92..000000000 --- a/profiles/seccomp/seccomp_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package seccomp - -import ( - "io/ioutil" - "testing" -) - -func TestLoadProfile(t *testing.T) { - f, err := ioutil.ReadFile("fixtures/example.json") - if err != nil { - t.Fatal(err) - } - if _, err := LoadProfile(string(f)); err != nil { - t.Fatal(err) - } -} - -func TestLoadDefaultProfile(t *testing.T) { - f, err := ioutil.ReadFile("default.json") - if err != nil { - t.Fatal(err) - } - if _, err := LoadProfile(string(f)); err != nil { - t.Fatal(err) - } -} diff --git a/profiles/seccomp/seccomp_unsupported.go b/profiles/seccomp/seccomp_unsupported.go deleted file mode 100644 index ec7399cd0..000000000 --- a/profiles/seccomp/seccomp_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux,!seccomp - -package seccomp - -import ( - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" -) - -// DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile(rs *specs.Spec) *types.Seccomp { - return nil -} diff --git a/reference/reference.go b/reference/reference.go deleted file mode 100644 index b22961b39..000000000 --- a/reference/reference.go +++ /dev/null @@ -1,211 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/image/v1" -) - -const ( - // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified - DefaultTag = "latest" - // DefaultHostname is the default built-in hostname - DefaultHostname = "docker.io" - // LegacyDefaultHostname is automatically converted to DefaultHostname - LegacyDefaultHostname = "index.docker.io" - // DefaultRepoPrefix is the prefix used for default repositories in default host - DefaultRepoPrefix = "library/" -) - -// Named is an object with a full name -type Named interface { - // Name returns normalized repository name, like "ubuntu". - Name() string - // String returns full reference, like "ubuntu@sha256:abcdef..." - String() string - // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" - FullName() string - // Hostname returns hostname for the reference, like "docker.io" - Hostname() string - // RemoteName returns the repository component of the full name, like "library/ubuntu" - RemoteName() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := distreference.ParseNamed(s) - if err != nil { - return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) - } - r, err := WithName(named.Name()) - if err != nil { - return nil, err - } - if canonical, isCanonical := named.(distreference.Canonical); isCanonical { - return WithDigest(r, canonical.Digest()) - } - if tagged, isTagged := named.(distreference.NamedTagged); isTagged { - return WithTag(r, tagged.Tag()) - } - return r, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - if err != nil { - return nil, err - } - return &namedRef{r}, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - r, err := distreference.WithTag(name, tag) - if err != nil { - return nil, err - } - return &taggedRef{namedRef{r}}, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - r, err := distreference.WithDigest(name, digest) - if err != nil { - return nil, err - } - return &canonicalRef{namedRef{r}}, nil -} - -type namedRef struct { - distreference.Named -} -type taggedRef struct { - namedRef -} -type canonicalRef struct { - namedRef -} - -func (r *namedRef) FullName() string { - hostname, remoteName := splitHostname(r.Name()) - return hostname + "/" + remoteName -} -func (r *namedRef) Hostname() string { - hostname, _ := splitHostname(r.Name()) - return hostname -} -func (r *namedRef) RemoteName() string { - _, remoteName := splitHostname(r.Name()) - return remoteName -} -func (r *taggedRef) Tag() string { - return r.namedRef.Named.(distreference.NamedTagged).Tag() -} -func (r *canonicalRef) Digest() digest.Digest { - return r.namedRef.Named.(distreference.Canonical).Digest() -} - -// WithDefaultTag adds a default tag to a reference if it only has a repo name. -func WithDefaultTag(ref Named) Named { - if IsNameOnly(ref) { - ref, _ = WithTag(ref, DefaultTag) - } - return ref -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// ParseIDOrReference parses string for an image ID or a reference. ID can be -// without a default prefix. -func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { - if err := v1.ValidateID(idOrRef); err == nil { - idOrRef = "sha256:" + idOrRef - } - if dgst, err := digest.ParseDigest(idOrRef); err == nil { - return dgst, nil, nil - } - ref, err := ParseNamed(idOrRef) - return "", ref, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, the default hostname is used. Repository name -// needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = DefaultHostname, name - } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == LegacyDefaultHostname { - hostname = DefaultHostname - } - if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = DefaultRepoPrefix + remoteName - } - return -} - -// normalize returns a repository name in its normalized form, meaning it -// will not contain default hostname nor library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == DefaultHostname { - if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil - } - return remoteName, nil - } - return name, nil -} - -func validateName(name string) error { - if err := v1.ValidateID(name); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} diff --git a/reference/reference_test.go b/reference/reference_test.go deleted file mode 100644 index ff35ba3da..000000000 --- a/reference/reference_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package reference - -import ( - "testing" - - "github.com/docker/distribution/digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - NormalizedName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Hostname: "docker.io", - }, - { - RemoteName: "library/ubuntu", - NormalizedName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Hostname: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - NormalizedName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "other/library", - NormalizedName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - NormalizedName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "privatebase", - NormalizedName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "privatebasee", - NormalizedName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - NormalizedName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Hostname: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.NormalizedName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - named, err = WithName(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.FullName(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { - t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") - if err != nil { - t.Fatal(err) - } - if _, isTagged := ref.(NamedTagged); isTagged { - t.Fatalf("Reference from %q should not support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should not support digest", ref) - } - if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := WithName("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := WithName("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithName to detect invalid digest") - } -} diff --git a/reference/store.go b/reference/store.go deleted file mode 100644 index fb72fff90..000000000 --- a/reference/store.go +++ /dev/null @@ -1,287 +0,0 @@ -package reference - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // ErrDoesNotExist is returned if a reference is not found in the - // store. - ErrDoesNotExist = errors.New("reference does not exist") -) - -// An Association is a tuple associating a reference with an image ID. -type Association struct { - Ref Named - ImageID image.ID -} - -// Store provides the set of methods which can operate on a tag store. -type Store interface { - References(id image.ID) []Named - ReferencesByName(ref Named) []Association - AddTag(ref Named, id image.ID, force bool) error - AddDigest(ref Canonical, id image.ID, force bool) error - Delete(ref Named) (bool, error) - Get(ref Named) (image.ID, error) -} - -type store struct { - mu sync.RWMutex - // jsonPath is the path to the file where the serialized tag data is - // stored. - jsonPath string - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - // referencesByIDCache is a cache of references indexed by ID, to speed - // up References. - referencesByIDCache map[image.ID]map[string]Named -} - -// Repository maps tags to image IDs. The key is a stringified Reference, -// including the repository name. -type repository map[string]image.ID - -type lexicalRefs []Named - -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } - -type lexicalAssociations []Association - -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } - -// NewReferenceStore creates a new reference store, tied to a file path where -// the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { - abspath, err := filepath.Abs(jsonPath) - if err != nil { - return nil, err - } - - store := &store{ - jsonPath: abspath, - Repositories: make(map[string]repository), - referencesByIDCache: make(map[image.ID]map[string]Named), - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -// AddTag adds a tag reference to the store. If force is set to true, existing -// references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref Named, id image.ID, force bool) error { - if _, isCanonical := ref.(Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - return store.addReference(WithDefaultTag(ref), id, force) -} - -// AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { - return store.addReference(ref, id, force) -} - -func (store *store) addReference(ref Named, id image.ID, force bool) error { - if ref.Name() == string(digest.Canonical) { - return errors.New("refusing to create an ambiguous tag using digest algorithm as name") - } - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - repository = make(map[string]image.ID) - store.Repositories[ref.Name()] = repository - } - - refStr := ref.String() - oldID, exists := repository[refStr] - - if exists { - // force only works for tags - if digested, isDigest := ref.(Canonical); isDigest { - return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) - } - - if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) - } - - if store.referencesByIDCache[oldID] != nil { - delete(store.referencesByIDCache[oldID], refStr) - if len(store.referencesByIDCache[oldID]) == 0 { - delete(store.referencesByIDCache, oldID) - } - } - } - - repository[refStr] = id - if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]Named) - } - store.referencesByIDCache[id][refStr] = ref - - return store.save() -} - -// Delete deletes a reference from the store. It returns true if a deletion -// happened, or false otherwise. -func (store *store) Delete(ref Named) (bool, error) { - ref = WithDefaultTag(ref) - - store.mu.Lock() - defer store.mu.Unlock() - - repoName := ref.Name() - - repository, exists := store.Repositories[repoName] - if !exists { - return false, ErrDoesNotExist - } - - refStr := ref.String() - if id, exists := repository[refStr]; exists { - delete(repository, refStr) - if len(repository) == 0 { - delete(store.Repositories, repoName) - } - if store.referencesByIDCache[id] != nil { - delete(store.referencesByIDCache[id], refStr) - if len(store.referencesByIDCache[id]) == 0 { - delete(store.referencesByIDCache, id) - } - } - return true, store.save() - } - - return false, ErrDoesNotExist -} - -// Get retrieves an item from the store by -func (store *store) Get(ref Named) (image.ID, error) { - ref = WithDefaultTag(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - return "", ErrDoesNotExist - } - - id, exists := repository[ref.String()] - if !exists { - return "", ErrDoesNotExist - } - - return id, nil -} - -// References returns a slice of references to the given image ID. The slice -// will be nil if there are no references to this image ID. -func (store *store) References(id image.ID) []Named { - store.mu.RLock() - defer store.mu.RUnlock() - - // Convert the internal map to an array for two reasons: - // 1) We must not return a mutable - // 2) It would be ugly to expose the extraneous map keys to callers. - - var references []Named - for _, ref := range store.referencesByIDCache[id] { - references = append(references, ref) - } - - sort.Sort(lexicalRefs(references)) - - return references -} - -// ReferencesByName returns the references for a given repository name. -// If there are no references known for this repository name, -// ReferencesByName returns nil. -func (store *store) ReferencesByName(ref Named) []Association { - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists { - return nil - } - - var associations []Association - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - return nil - } - associations = append(associations, - Association{ - Ref: ref, - ImageID: refID, - }) - } - - sort.Sort(lexicalAssociations(associations)) - - return associations -} - -func (store *store) save() error { - // Store the json - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) -} - -func (store *store) reload() error { - f, err := os.Open(store.jsonPath) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - - for _, repository := range store.Repositories { - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - continue - } - if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]Named) - } - store.referencesByIDCache[refID][refStr] = ref - } - } - - return nil -} diff --git a/reference/store_test.go b/reference/store_test.go deleted file mode 100644 index a877c55f5..000000000 --- a/reference/store_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package reference - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/image" -) - -var ( - saveLoadTestCases = map[string]image.ID{ - "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", - "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", - "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", - "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", - "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", - "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", - "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", - } - - marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) -) - -func TestLoad(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - defer os.RemoveAll(jsonFile.Name()) - - // Write canned json to the temp file - _, err = jsonFile.Write(marshalledSaveLoadTestCases) - if err != nil { - t.Fatalf("error writing to temp file: %v", err) - } - jsonFile.Close() - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - for refStr, expectedID := range saveLoadTestCases { - ref, err := ParseNamed(refStr) - if err != nil { - t.Fatalf("failed to parse reference: %v", err) - } - id, err := store.Get(ref) - if err != nil { - t.Fatalf("could not find reference %s: %v", refStr, err) - } - if id != expectedID { - t.Fatalf("expected %s - got %s", expectedID, id) - } - } -} - -func TestSave(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - _, err = jsonFile.Write([]byte(`{}`)) - jsonFile.Close() - defer os.RemoveAll(jsonFile.Name()) - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - for refStr, id := range saveLoadTestCases { - ref, err := ParseNamed(refStr) - if err != nil { - t.Fatalf("failed to parse reference: %v", err) - } - if canonical, ok := ref.(Canonical); ok { - err = store.AddDigest(canonical, id, false) - if err != nil { - t.Fatalf("could not add digest reference %s: %v", refStr, err) - } - } else { - err = store.AddTag(ref, id, false) - if err != nil { - t.Fatalf("could not add reference %s: %v", refStr, err) - } - } - } - - jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) - if err != nil { - t.Fatalf("could not read json file: %v", err) - } - - if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { - t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) - } -} - -func TestAddDeleteGet(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - _, err = jsonFile.Write([]byte(`{}`)) - jsonFile.Close() - defer os.RemoveAll(jsonFile.Name()) - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") - testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") - testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") - - // Try adding a reference with no tag or digest - nameOnly, err := WithName("username/repo") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(nameOnly, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - // Add a few references - ref1, err := ParseNamed("username/repo1:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref1, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref2, err := ParseNamed("username/repo1:old") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref2, testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref3, err := ParseNamed("username/repo1:alias") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref3, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref4, err := ParseNamed("username/repo2:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref4, testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - // Attempt to overwrite with force == false - if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { - t.Fatalf("did not get expected error on overwrite attempt - got %v", err) - } - // Repeat to overwrite with force == true - if err = store.AddTag(ref4, testImageID3, true); err != nil { - t.Fatalf("failed to force tag overwrite: %v", err) - } - - // Check references so far - id, err := store.Get(nameOnly) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref1) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref2) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID2 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) - } - - id, err = store.Get(ref3) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref4) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID3 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) - } - - id, err = store.Get(ref5) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID2 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) - } - - // Get should return ErrDoesNotExist for a nonexistent repo - nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - - // Get should return ErrDoesNotExist for a nonexistent tag - nonExistTag, err := ParseNamed("username/repo1:nonexist") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - - // Check References - refs := store.References(testImageID1) - if len(refs) != 3 { - t.Fatal("unexpected number of references") - } - // Looking for the references in this order verifies that they are - // returned lexically sorted. - if refs[0].String() != ref3.String() { - t.Fatalf("unexpected reference: %v", refs[0].String()) - } - if refs[1].String() != ref1.String() { - t.Fatalf("unexpected reference: %v", refs[1].String()) - } - if refs[2].String() != nameOnly.String()+":latest" { - t.Fatalf("unexpected reference: %v", refs[2].String()) - } - - // Check ReferencesByName - repoName, err := WithName("username/repo1") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - associations := store.ReferencesByName(repoName) - if len(associations) != 3 { - t.Fatal("unexpected number of associations") - } - // Looking for the associations in this order verifies that they are - // returned lexically sorted. - if associations[0].Ref.String() != ref3.String() { - t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) - } - if associations[0].ImageID != testImageID1 { - t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) - } - if associations[1].Ref.String() != ref1.String() { - t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) - } - if associations[1].ImageID != testImageID1 { - t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) - } - if associations[2].Ref.String() != ref2.String() { - t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) - } - if associations[2].ImageID != testImageID2 { - t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) - } - - // Delete should return ErrDoesNotExist for a nonexistent repo - if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Delete") - } - - // Delete should return ErrDoesNotExist for a nonexistent tag - if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Delete") - } - - // Delete a few references - if deleted, err := store.Delete(ref1); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(ref1); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - if deleted, err := store.Delete(ref5); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(ref5); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(nameOnly); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } -} - -func TestInvalidTags(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "tag-store-test") - defer os.RemoveAll(tmpDir) - - store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - id := image.ID("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") - - // sha256 as repo name - ref, err := ParseNamed("sha256:abc") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting tag %q to fail", ref) - } - - // setting digest as a tag - ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting digest %q to fail", ref) - } - -} diff --git a/registry/auth.go b/registry/auth.go deleted file mode 100644 index 0b5257182..000000000 --- a/registry/auth.go +++ /dev/null @@ -1,300 +0,0 @@ -package registry - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -// loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { - registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) - if err != nil { - return "", "", err - } - - serverAddress := registryEndpoint.String() - - logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) - - if serverAddress == "" { - return "", "", fmt.Errorf("Server Error: Server Address not set.") - } - - loginAgainstOfficialIndex := serverAddress == IndexServer - - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - if err != nil { - return "", "", err - } - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - // fallback when request could not be completed - return "", "", fallbackError{ - err: err, - } - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", "", err - } - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", "", nil - } else if resp.StatusCode == http.StatusUnauthorized { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") - } - return "", "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == http.StatusForbidden { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") - } - // *TODO: Use registry configuration to determine what this says, if anything? - return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", "", fmt.Errorf("Internal Server Error") - } - return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) -} - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") - - modifiers := DockerHeaders(userAgent, nil) - authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - - credentialAuthConfig := *authConfig - creds := loginCredentialStore{ - authConfig: &credentialAuthConfig, - } - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - convertToHostname := func(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == convertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (auth.ChallengeManager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := auth.NewSimpleChallengeManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/registry/auth_test.go b/registry/auth_test.go deleted file mode 100644 index eedee44ef..000000000 --- a/registry/auth_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package registry - -import ( - "testing" - - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -func buildAuthConfigs() map[string]types.AuthConfig { - authConfigs := map[string]types.AuthConfig{} - - for _, registry := range []string{"testIndex", IndexServer} { - authConfigs[registry] = types.AuthConfig{ - Username: "docker-user", - Password: "docker-pass", - } - } - - return authConfigs -} - -func TestSameAuthDataPostSave(t *testing.T) { - authConfigs := buildAuthConfigs() - authConfig := authConfigs["testIndex"] - if authConfig.Username != "docker-user" { - t.Fail() - } - if authConfig.Password != "docker-pass" { - t.Fail() - } - if authConfig.Auth != "" { - t.Fail() - } -} - -func TestResolveAuthConfigIndexServer(t *testing.T) { - authConfigs := buildAuthConfigs() - indexConfig := authConfigs[IndexServer] - - officialIndex := ®istrytypes.IndexInfo{ - Official: true, - } - privateIndex := ®istrytypes.IndexInfo{ - Official: false, - } - - resolved := ResolveAuthConfig(authConfigs, officialIndex) - assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") - - resolved = ResolveAuthConfig(authConfigs, privateIndex) - assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") -} - -func TestResolveAuthConfigFullURL(t *testing.T) { - authConfigs := buildAuthConfigs() - - registryAuth := types.AuthConfig{ - Username: "foo-user", - Password: "foo-pass", - } - localAuth := types.AuthConfig{ - Username: "bar-user", - Password: "bar-pass", - } - officialAuth := types.AuthConfig{ - Username: "baz-user", - Password: "baz-pass", - } - authConfigs[IndexServer] = officialAuth - - expectedAuths := map[string]types.AuthConfig{ - "registry.example.com": registryAuth, - "localhost:8000": localAuth, - "registry.com": localAuth, - } - - validRegistries := map[string][]string{ - "registry.example.com": { - "https://registry.example.com/v1/", - "http://registry.example.com/v1/", - "registry.example.com", - "registry.example.com/v1/", - }, - "localhost:8000": { - "https://localhost:8000/v1/", - "http://localhost:8000/v1/", - "localhost:8000", - "localhost:8000/v1/", - }, - "registry.com": { - "https://registry.com/v1/", - "http://registry.com/v1/", - "registry.com", - "registry.com/v1/", - }, - } - - for configKey, registries := range validRegistries { - configured, ok := expectedAuths[configKey] - if !ok { - t.Fail() - } - index := ®istrytypes.IndexInfo{ - Name: configKey, - } - for _, registry := range registries { - authConfigs[registry] = configured - resolved := ResolveAuthConfig(authConfigs, index) - if resolved.Username != configured.Username || resolved.Password != configured.Password { - t.Errorf("%s -> %v != %v\n", registry, resolved, configured) - } - delete(authConfigs, registry) - resolved = ResolveAuthConfig(authConfigs, index) - if resolved.Username == configured.Username || resolved.Password == configured.Password { - t.Errorf("%s -> %v == %v\n", registry, resolved, configured) - } - } - } -} diff --git a/registry/config.go b/registry/config.go deleted file mode 100644 index 51302d110..000000000 --- a/registry/config.go +++ /dev/null @@ -1,274 +0,0 @@ -package registry - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" - - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig - V2Only bool -} - -var ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexServer is the v1 registry server used for user auth + account creation - IndexServer = DefaultV1Registry.String() + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" - - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = &url.URL{ - Scheme: "https", - Host: "index.docker.io", - } - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } -) - -var ( - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig = newServiceConfig(ServiceOptions{}) -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// InstallCliFlags adds command-line options to the top-level flag parser for -// the current process. -func (options *ServiceOptions) InstallCliFlags(cmd *flag.FlagSet, usageFn func(string) string) { - mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) - cmd.Var(mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) - - insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) - cmd.Var(insecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) - - cmd.BoolVar(&options.V2Only, []string{"-disable-legacy-registry"}, false, usageFn("Do not contact legacy registries")) -} - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) *serviceConfig { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - options.InsecureRegistries = append(options.InsecureRegistries, "127.0.0.0/8") - - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - Mirrors: options.Mirrors, - }, - V2Only: options.V2Only, - } - // Split --insecure-registry into CIDR and registry-specific settings. - for _, r := range options.InsecureRegistries { - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*registrytypes.NetIPNet)(ipnet)) - } else { - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return config -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - host, _, err := net.SplitHostPort(indexName) - if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return false - } - } - } - - return true -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("%s is not a valid URI", val) - } - - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) - } - - if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") - } - - return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - if val == reference.LegacyDefaultHostname { - val = reference.DefaultHostname - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, name.Hostname()) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(name.Name(), '/') - return &RepositoryInfo{name, index, official}, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/registry/config_test.go b/registry/config_test.go deleted file mode 100644 index 25578a7f2..000000000 --- a/registry/config_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package registry - -import ( - "testing" -) - -func TestValidateMirror(t *testing.T) { - valid := []string{ - "http://mirror-1.com", - "https://mirror-1.com", - "http://localhost", - "https://localhost", - "http://localhost:5000", - "https://localhost:5000", - "http://127.0.0.1", - "https://127.0.0.1", - "http://127.0.0.1:5000", - "https://127.0.0.1:5000", - } - - invalid := []string{ - "!invalid!://%as%", - "ftp://mirror-1.com", - "http://mirror-1.com/", - "http://mirror-1.com/?q=foo", - "http://mirror-1.com/v1/", - "http://mirror-1.com/v1/?q=foo", - "http://mirror-1.com/v1/?q=foo#frag", - "http://mirror-1.com?q=foo", - "https://mirror-1.com#frag", - "https://mirror-1.com/", - "https://mirror-1.com/#frag", - "https://mirror-1.com/v1/", - "https://mirror-1.com/v1/#", - "https://mirror-1.com?q", - } - - for _, address := range valid { - if ret, err := ValidateMirror(address); err != nil || ret == "" { - t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) - } - } - - for _, address := range invalid { - if ret, err := ValidateMirror(address); err == nil || ret != "" { - t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) - } - } -} diff --git a/registry/config_unix.go b/registry/config_unix.go deleted file mode 100644 index b81d24933..000000000 --- a/registry/config_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package registry - -var ( - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" -) - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/registry/config_windows.go b/registry/config_windows.go deleted file mode 100644 index 82bc4afea..000000000 --- a/registry/config_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go deleted file mode 100644 index 8451d3f67..000000000 --- a/registry/endpoint_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package registry - -import ( - "net/http" - "net/http/httptest" - "net/url" - "testing" -) - -func TestEndpointParse(t *testing.T) { - testData := []struct { - str string - expected string - }{ - {IndexServer, IndexServer}, - {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, - {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, - {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, - {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, - {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, - } - for _, td := range testData { - e, err := newV1EndpointFromStr(td.str, nil, "", nil) - if err != nil { - t.Errorf("%q: %s", td.str, err) - } - if e == nil { - t.Logf("something's fishy, endpoint for %q is nil", td.str) - continue - } - if e.String() != td.expected { - t.Errorf("expected %q, got %q", td.expected, e.String()) - } - } -} - -func TestEndpointParseInvalid(t *testing.T) { - testData := []string{ - "http://0.0.0.0:5000/v2/", - } - for _, td := range testData { - e, err := newV1EndpointFromStr(td, nil, "", nil) - if err == nil { - t.Errorf("expected error parsing %q: parsed as %q", td, e) - } - } -} - -// Ensure that a registry endpoint that responds with a 401 only is determined -// to be a valid v1 registry endpoint -func TestValidateEndpoint(t *testing.T) { - requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) - w.WriteHeader(http.StatusUnauthorized) - }) - - // Make a test server which should validate as a v1 server. - testServer := httptest.NewServer(requireBasicAuthHandler) - defer testServer.Close() - - testServerURL, err := url.Parse(testServer.URL) - if err != nil { - t.Fatal(err) - } - - testEndpoint := V1Endpoint{ - URL: testServerURL, - client: HTTPClient(NewTransport(nil)), - } - - if err = validateEndpoint(&testEndpoint); err != nil { - t.Fatal(err) - } - - if testEndpoint.URL.Scheme != "http" { - t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) - } -} diff --git a/registry/endpoint_v1.go b/registry/endpoint_v1.go deleted file mode 100644 index fd81972c7..000000000 --- a/registry/endpoint_v1.go +++ /dev/null @@ -1,198 +0,0 @@ -package registry - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - endpoint := &V1Endpoint{ - IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) - return endpoint, nil -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/registry/reference.go b/registry/reference.go deleted file mode 100644 index e15f83eee..000000000 --- a/registry/reference.go +++ /dev/null @@ -1,68 +0,0 @@ -package registry - -import ( - "strings" - - "github.com/docker/distribution/digest" -) - -// Reference represents a tag or digest within a repository -type Reference interface { - // HasDigest returns whether the reference has a verifiable - // content addressable reference which may be considered secure. - HasDigest() bool - - // ImageName returns an image name for the given repository - ImageName(string) string - - // Returns a string representation of the reference - String() string -} - -type tagReference struct { - tag string -} - -func (tr tagReference) HasDigest() bool { - return false -} - -func (tr tagReference) ImageName(repo string) string { - return repo + ":" + tr.tag -} - -func (tr tagReference) String() string { - return tr.tag -} - -type digestReference struct { - digest digest.Digest -} - -func (dr digestReference) HasDigest() bool { - return true -} - -func (dr digestReference) ImageName(repo string) string { - return repo + "@" + dr.String() -} - -func (dr digestReference) String() string { - return dr.digest.String() -} - -// ParseReference parses a reference into either a digest or tag reference -func ParseReference(ref string) Reference { - if strings.Contains(ref, ":") { - dgst, err := digest.ParseDigest(ref) - if err == nil { - return digestReference{digest: dgst} - } - } - return tagReference{tag: ref} -} - -// DigestReference creates a digest reference using a digest -func DigestReference(dgst digest.Digest) Reference { - return digestReference{digest: dgst} -} diff --git a/registry/registry.go b/registry/registry.go deleted file mode 100644 index 973bff9f9..000000000 --- a/registry/registry.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir != "" { - hostDir := filepath.Join(CertsDir, cleanPath(hostname)) - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return &tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - // TODO(dmcgowan): Copy system pool - tlsConfig.RootCAs = x509.NewCertPool() - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// DockerHeaders returns request modifiers with a User-Agent and metaHeaders -func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - var cfg = tlsconfig.ServerDefault - tlsConfig = &cfg - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - return base -} diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go deleted file mode 100644 index 828f48fc9..000000000 --- a/registry/registry_mock_test.go +++ /dev/null @@ -1,476 +0,0 @@ -package registry - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "strings" - "testing" - "time" - - "github.com/docker/docker/reference" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/gorilla/mux" - - "github.com/Sirupsen/logrus" -) - -var ( - testHTTPServer *httptest.Server - testHTTPSServer *httptest.Server - testLayers = map[string]map[string]string{ - "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { - "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", - "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, - "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, - "Tty":false,"OpenStdin":false,"StdinOnce":false, - "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, - "VolumesFrom":"","Entrypoint":null},"Size":424242}`, - "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", - "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, - "layer": string([]byte{ - 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, - 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, - 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, - 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, - 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, - 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, - 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, - 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, - 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, - 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, - }), - }, - "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { - "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", - "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, - "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, - "Tty":false,"OpenStdin":false,"StdinOnce":false, - "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, - "VolumesFrom":"","Entrypoint":null},"Size":424242}`, - "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", - "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", - "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, - "layer": string([]byte{ - 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, - 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, - 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, - 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, - 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, - 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, - 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, - 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, - 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, - 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, - }), - }, - } - testRepositories = map[string]map[string]string{ - "foo42/bar": { - "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - }, - } - mockHosts = map[string][]net.IP{ - "": {net.ParseIP("0.0.0.0")}, - "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, - "example.com": {net.ParseIP("42.42.42.42")}, - "other.com": {net.ParseIP("43.43.43.43")}, - } -) - -func init() { - r := mux.NewRouter() - - // /v1/ - r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") - r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") - r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") - r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") - r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") - r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") - r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") - r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") - r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") - r.HandleFunc("/v1/search", handlerSearch).Methods("GET") - - // /v2/ - r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") - - testHTTPServer = httptest.NewServer(handlerAccessLog(r)) - testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) - - // override net.LookupIP - lookupIP = func(host string) ([]net.IP, error) { - if host == "127.0.0.1" { - // I believe in future Go versions this will fail, so let's fix it later - return net.LookupIP(host) - } - for h, addrs := range mockHosts { - if host == h { - return addrs, nil - } - for _, addr := range addrs { - if addr.String() == host { - return []net.IP{addr}, nil - } - } - } - return nil, errors.New("lookup: no such host") - } -} - -func handlerAccessLog(handler http.Handler) http.Handler { - logHandler := func(w http.ResponseWriter, r *http.Request) { - logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) - handler.ServeHTTP(w, r) - } - return http.HandlerFunc(logHandler) -} - -func makeURL(req string) string { - return testHTTPServer.URL + req -} - -func makeHTTPSURL(req string) string { - return testHTTPSServer.URL + req -} - -func makeIndex(req string) *registrytypes.IndexInfo { - index := ®istrytypes.IndexInfo{ - Name: makeURL(req), - } - return index -} - -func makeHTTPSIndex(req string) *registrytypes.IndexInfo { - index := ®istrytypes.IndexInfo{ - Name: makeHTTPSURL(req), - } - return index -} - -func makePublicIndex() *registrytypes.IndexInfo { - index := ®istrytypes.IndexInfo{ - Name: IndexServer, - Secure: true, - Official: true, - } - return index -} - -func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { - options := ServiceOptions{ - Mirrors: mirrors, - InsecureRegistries: insecureRegistries, - } - - return newServiceConfig(options) -} - -func writeHeaders(w http.ResponseWriter) { - h := w.Header() - h.Add("Server", "docker-tests/mock") - h.Add("Expires", "-1") - h.Add("Content-Type", "application/json") - h.Add("Pragma", "no-cache") - h.Add("Cache-Control", "no-cache") - h.Add("X-Docker-Registry-Version", "0.0.0") - h.Add("X-Docker-Registry-Config", "mock") -} - -func writeResponse(w http.ResponseWriter, message interface{}, code int) { - writeHeaders(w) - w.WriteHeader(code) - body, err := json.Marshal(message) - if err != nil { - io.WriteString(w, err.Error()) - return - } - w.Write(body) -} - -func readJSON(r *http.Request, dest interface{}) error { - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return err - } - return json.Unmarshal(body, dest) -} - -func apiError(w http.ResponseWriter, message string, code int) { - body := map[string]string{ - "error": message, - } - writeResponse(w, body, code) -} - -func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { - if a == b { - return - } - if len(message) == 0 { - message = fmt.Sprintf("%v != %v", a, b) - } - t.Fatal(message) -} - -func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { - if a != b { - return - } - if len(message) == 0 { - message = fmt.Sprintf("%v == %v", a, b) - } - t.Fatal(message) -} - -// Similar to assertEqual, but does not stop test -func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { - if a == b { - return - } - message := fmt.Sprintf("%v != %v", a, b) - if len(messagePrefix) != 0 { - message = messagePrefix + ": " + message - } - t.Error(message) -} - -// Similar to assertNotEqual, but does not stop test -func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { - if a != b { - return - } - message := fmt.Sprintf("%v == %v", a, b) - if len(messagePrefix) != 0 { - message = messagePrefix + ": " + message - } - t.Error(message) -} - -func requiresAuth(w http.ResponseWriter, r *http.Request) bool { - writeCookie := func() { - value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) - cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} - http.SetCookie(w, cookie) - //FIXME(sam): this should be sent only on Index routes - value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) - w.Header().Add("X-Docker-Token", value) - } - if len(r.Cookies()) > 0 { - writeCookie() - return true - } - if len(r.Header.Get("Authorization")) > 0 { - writeCookie() - return true - } - w.Header().Add("WWW-Authenticate", "token") - apiError(w, "Wrong auth", 401) - return false -} - -func handlerGetPing(w http.ResponseWriter, r *http.Request) { - writeResponse(w, true, 200) -} - -func handlerGetImage(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - layer, exists := testLayers[vars["image_id"]] - if !exists { - http.NotFound(w, r) - return - } - writeHeaders(w) - layerSize := len(layer["layer"]) - w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) - io.WriteString(w, layer[vars["action"]]) -} - -func handlerPutImage(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - imageID := vars["image_id"] - action := vars["action"] - layer, exists := testLayers[imageID] - if !exists { - if action != "json" { - http.NotFound(w, r) - return - } - layer = make(map[string]string) - testLayers[imageID] = layer - } - if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { - if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { - apiError(w, "Wrong checksum", 400) - return - } - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - apiError(w, fmt.Sprintf("Error: %s", err), 500) - return - } - layer[action] = string(body) - writeResponse(w, true, 200) -} - -func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) - if err != nil { - apiError(w, "Could not parse repository", 400) - return - } - tags, exists := testRepositories[repositoryName.String()] - if !exists { - apiError(w, "Repository not found", 404) - return - } - if r.Method == "DELETE" { - delete(testRepositories, repositoryName.String()) - writeResponse(w, true, 200) - return - } - writeResponse(w, tags, 200) -} - -func handlerGetTag(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - repositoryName, err := reference.WithName(vars["repository"]) - if err != nil { - apiError(w, "Could not parse repository", 400) - return - } - tagName := vars["tag"] - tags, exists := testRepositories[repositoryName.String()] - if !exists { - apiError(w, "Repository not found", 404) - return - } - tag, exists := tags[tagName] - if !exists { - apiError(w, "Tag not found", 404) - return - } - writeResponse(w, tag, 200) -} - -func handlerPutTag(w http.ResponseWriter, r *http.Request) { - if !requiresAuth(w, r) { - return - } - vars := mux.Vars(r) - repositoryName, err := reference.WithName(vars["repository"]) - if err != nil { - apiError(w, "Could not parse repository", 400) - return - } - tagName := vars["tag"] - tags, exists := testRepositories[repositoryName.String()] - if !exists { - tags = make(map[string]string) - testRepositories[repositoryName.String()] = tags - } - tagValue := "" - readJSON(r, tagValue) - tags[tagName] = tagValue - writeResponse(w, true, 200) -} - -func handlerUsers(w http.ResponseWriter, r *http.Request) { - code := 200 - if r.Method == "POST" { - code = 201 - } else if r.Method == "PUT" { - code = 204 - } - writeResponse(w, "", code) -} - -func handlerImages(w http.ResponseWriter, r *http.Request) { - u, _ := url.Parse(testHTTPServer.URL) - w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) - w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) - if r.Method == "PUT" { - if strings.HasSuffix(r.URL.Path, "images") { - writeResponse(w, "", 204) - return - } - writeResponse(w, "", 200) - return - } - if r.Method == "DELETE" { - writeResponse(w, "", 204) - return - } - images := []map[string]string{} - for imageID, layer := range testLayers { - image := make(map[string]string) - image["id"] = imageID - image["checksum"] = layer["checksum_tarsum"] - image["Tag"] = "latest" - images = append(images, image) - } - writeResponse(w, images, 200) -} - -func handlerAuth(w http.ResponseWriter, r *http.Request) { - writeResponse(w, "OK", 200) -} - -func handlerSearch(w http.ResponseWriter, r *http.Request) { - result := ®istrytypes.SearchResults{ - Query: "fakequery", - NumResults: 1, - Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, - } - writeResponse(w, result, 200) -} - -func TestPing(t *testing.T) { - res, err := http.Get(makeURL("/v1/_ping")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, res.StatusCode, 200, "") - assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", - "This is not a Mocked Registry") -} - -/* Uncomment this to test Mocked Registry locally with curl - * WARNING: Don't push on the repos uncommented, it'll block the tests - * -func TestWait(t *testing.T) { - logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) - c := make(chan int) - <-c -} - -//*/ diff --git a/registry/registry_test.go b/registry/registry_test.go deleted file mode 100644 index 9927af32d..000000000 --- a/registry/registry_test.go +++ /dev/null @@ -1,873 +0,0 @@ -package registry - -import ( - "fmt" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "testing" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -var ( - token = []string{"fake-token"} -) - -const ( - imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" - REPO = "foo42/bar" -) - -func spawnTestRegistrySession(t *testing.T) *Session { - authConfig := &types.AuthConfig{} - endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) - if err != nil { - t.Fatal(err) - } - userAgent := "docker test client" - var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} - tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) - client := HTTPClient(tr) - r, err := NewSession(client, authConfig, endpoint) - if err != nil { - t.Fatal(err) - } - // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` - // header while authenticating, in order to retrieve a token that can be later used to - // perform authenticated actions. - // - // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, - // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. - // - // Because we know that the client's transport is an `*authTransport` we simply cast it, - // in order to set the internal cached token to the fake token, and thus send that fake token - // upon every subsequent requests. - r.client.Transport.(*authTransport).token = token - return r -} - -func TestPingRegistryEndpoint(t *testing.T) { - testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { - ep, err := NewV1Endpoint(index, "", nil) - if err != nil { - t.Fatal(err) - } - regInfo, err := ep.Ping() - if err != nil { - t.Fatal(err) - } - - assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) - } - - testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makePublicIndex(), false, "Expected standalone to be false for public index") -} - -func TestEndpoint(t *testing.T) { - // Simple wrapper to fail test if err != nil - expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { - endpoint, err := NewV1Endpoint(index, "", nil) - if err != nil { - t.Fatal(err) - } - return endpoint - } - - assertInsecureIndex := func(index *registrytypes.IndexInfo) { - index.Secure = true - _, err := NewV1Endpoint(index, "", nil) - assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") - assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") - index.Secure = false - } - - assertSecureIndex := func(index *registrytypes.IndexInfo) { - index.Secure = true - _, err := NewV1Endpoint(index, "", nil) - assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") - assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") - index.Secure = false - } - - index := ®istrytypes.IndexInfo{} - index.Name = makeURL("/v1/") - endpoint := expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - assertInsecureIndex(index) - - index.Name = makeURL("") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - assertInsecureIndex(index) - - httpURL := makeURL("") - index.Name = strings.SplitN(httpURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") - assertInsecureIndex(index) - - index.Name = makeHTTPSURL("/v1/") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - assertSecureIndex(index) - - index.Name = makeHTTPSURL("") - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - assertSecureIndex(index) - - httpsURL := makeHTTPSURL("") - index.Name = strings.SplitN(httpsURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") - assertSecureIndex(index) - - badEndpoints := []string{ - "http://127.0.0.1/v1/", - "https://127.0.0.1/v1/", - "http://127.0.0.1", - "https://127.0.0.1", - "127.0.0.1", - } - for _, address := range badEndpoints { - index.Name = address - _, err := NewV1Endpoint(index, "", nil) - checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") - } -} - -func TestGetRemoteHistory(t *testing.T) { - r := spawnTestRegistrySession(t) - hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(hist), 2, "Expected 2 images in history") - assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") - assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - "Unexpected second ancestry") -} - -func TestLookupRemoteImage(t *testing.T) { - r := spawnTestRegistrySession(t) - err := r.LookupRemoteImage(imageID, makeURL("/v1/")) - assertEqual(t, err, nil, "Expected error of remote lookup to nil") - if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { - t.Fatal("Expected error of remote lookup to not nil") - } -} - -func TestGetRemoteImageJSON(t *testing.T) { - r := spawnTestRegistrySession(t) - json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - assertEqual(t, size, int64(154), "Expected size 154") - if len(json) == 0 { - t.Fatal("Expected non-empty json") - } - - _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) - if err == nil { - t.Fatal("Expected image not found error") - } -} - -func TestGetRemoteImageLayer(t *testing.T) { - r := spawnTestRegistrySession(t) - data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) - if err != nil { - t.Fatal(err) - } - if data == nil { - t.Fatal("Expected non-nil data result") - } - - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) - if err == nil { - t.Fatal("Expected image not found error") - } -} - -func TestGetRemoteTag(t *testing.T) { - r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) - if err != nil { - t.Fatal(err) - } - tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") - if err != nil { - t.Fatal(err) - } - assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) - - bazRef, err := reference.ParseNamed("foo42/baz") - if err != nil { - t.Fatal(err) - } - _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") - if err != ErrRepoNotFound { - t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") - } -} - -func TestGetRemoteTags(t *testing.T) { - r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) - if err != nil { - t.Fatal(err) - } - tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(tags), 2, "Expected two tags") - assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) - assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) - - bazRef, err := reference.ParseNamed("foo42/baz") - if err != nil { - t.Fatal(err) - } - _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) - if err != ErrRepoNotFound { - t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") - } -} - -func TestGetRepositoryData(t *testing.T) { - r := spawnTestRegistrySession(t) - parsedURL, err := url.Parse(makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } - host := "http://" + parsedURL.Host + "/v1/" - repoRef, err := reference.ParseNamed(REPO) - if err != nil { - t.Fatal(err) - } - data, err := r.GetRepositoryData(repoRef) - if err != nil { - t.Fatal(err) - } - assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") - assertEqual(t, len(data.Endpoints), 2, - fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) - assertEqual(t, data.Endpoints[0], host, - fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) - assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", - fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) - -} - -func TestPushImageJSONRegistry(t *testing.T) { - r := spawnTestRegistrySession(t) - imgData := &ImgData{ - ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - } - - err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } -} - -func TestPushImageLayerRegistry(t *testing.T) { - r := spawnTestRegistrySession(t) - layer := strings.NewReader("") - _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) - if err != nil { - t.Fatal(err) - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type staticRepositoryInfo struct { - Index *registrytypes.IndexInfo - RemoteName string - CanonicalName string - LocalName string - Official bool - } - - expectedRepoInfos := map[string]staticRepositoryInfo{ - "fooo/bar": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "fooo/bar", - LocalName: "fooo/bar", - CanonicalName: "docker.io/fooo/bar", - Official: false, - }, - "library/ubuntu": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", - Official: true, - }, - "nonlibrary/ubuntu": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "nonlibrary/ubuntu", - LocalName: "nonlibrary/ubuntu", - CanonicalName: "docker.io/nonlibrary/ubuntu", - Official: false, - }, - "ubuntu": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", - Official: true, - }, - "other/library": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "other/library", - LocalName: "other/library", - CanonicalName: "docker.io/other/library", - Official: false, - }, - "127.0.0.1:8000/private/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: "127.0.0.1:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "127.0.0.1:8000/private/moonbase", - CanonicalName: "127.0.0.1:8000/private/moonbase", - Official: false, - }, - "127.0.0.1:8000/privatebase": { - Index: ®istrytypes.IndexInfo{ - Name: "127.0.0.1:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "127.0.0.1:8000/privatebase", - CanonicalName: "127.0.0.1:8000/privatebase", - Official: false, - }, - "localhost:8000/private/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: "localhost:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "localhost:8000/private/moonbase", - CanonicalName: "localhost:8000/private/moonbase", - Official: false, - }, - "localhost:8000/privatebase": { - Index: ®istrytypes.IndexInfo{ - Name: "localhost:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "localhost:8000/privatebase", - CanonicalName: "localhost:8000/privatebase", - Official: false, - }, - "example.com/private/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: "example.com", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "example.com/private/moonbase", - CanonicalName: "example.com/private/moonbase", - Official: false, - }, - "example.com/privatebase": { - Index: ®istrytypes.IndexInfo{ - Name: "example.com", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "example.com/privatebase", - CanonicalName: "example.com/privatebase", - Official: false, - }, - "example.com:8000/private/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: "example.com:8000", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "example.com:8000/private/moonbase", - CanonicalName: "example.com:8000/private/moonbase", - Official: false, - }, - "example.com:8000/privatebase": { - Index: ®istrytypes.IndexInfo{ - Name: "example.com:8000", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "example.com:8000/privatebase", - CanonicalName: "example.com:8000/privatebase", - Official: false, - }, - "localhost/private/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: "localhost", - Official: false, - }, - RemoteName: "private/moonbase", - LocalName: "localhost/private/moonbase", - CanonicalName: "localhost/private/moonbase", - Official: false, - }, - "localhost/privatebase": { - Index: ®istrytypes.IndexInfo{ - Name: "localhost", - Official: false, - }, - RemoteName: "privatebase", - LocalName: "localhost/privatebase", - CanonicalName: "localhost/privatebase", - Official: false, - }, - IndexName + "/public/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", - Official: false, - }, - "index." + IndexName + "/public/moonbase": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", - Official: false, - }, - "ubuntu-12.04-base": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - IndexName + "/ubuntu-12.04-base": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - "index." + IndexName + "/ubuntu-12.04-base": { - Index: ®istrytypes.IndexInfo{ - Name: IndexName, - Official: true, - }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", - Official: true, - }, - } - - for reposName, expectedRepoInfo := range expectedRepoInfos { - named, err := reference.WithName(reposName) - if err != nil { - t.Error(err) - } - - repoInfo, err := ParseRepositoryInfo(named) - if err != nil { - t.Error(err) - } else { - checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) - checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) - checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) - checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) - checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) - checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) - } - } -} - -func TestNewIndexInfo(t *testing.T) { - testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { - for indexName, expectedIndexInfo := range expectedIndexInfos { - index, err := newIndexInfo(config, indexName) - if err != nil { - t.Fatal(err) - } else { - checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") - checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") - checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") - checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") - } - } - } - - config := newServiceConfig(ServiceOptions{}) - noMirrors := []string{} - expectedIndexInfos := map[string]*registrytypes.IndexInfo{ - IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: noMirrors, - }, - "index." + IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: noMirrors, - }, - "example.com": { - Name: "example.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) - - publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} - config = makeServiceConfig(publicMirrors, []string{"example.com"}) - - expectedIndexInfos = map[string]*registrytypes.IndexInfo{ - IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: publicMirrors, - }, - "index." + IndexName: { - Name: IndexName, - Official: true, - Secure: true, - Mirrors: publicMirrors, - }, - "example.com": { - Name: "example.com", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "example.com:5000": { - Name: "example.com:5000", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - "127.0.0.1": { - Name: "127.0.0.1", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "other.com": { - Name: "other.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) - - config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) - expectedIndexInfos = map[string]*registrytypes.IndexInfo{ - "example.com": { - Name: "example.com", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "example.com:5000": { - Name: "example.com:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1": { - Name: "127.0.0.1", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "127.0.0.1:5000": { - Name: "127.0.0.1:5000", - Official: false, - Secure: false, - Mirrors: noMirrors, - }, - "other.com": { - Name: "other.com", - Official: false, - Secure: true, - Mirrors: noMirrors, - }, - } - testIndexInfo(config, expectedIndexInfos) -} - -func TestMirrorEndpointLookup(t *testing.T) { - containsMirror := func(endpoints []APIEndpoint) bool { - for _, pe := range endpoints { - if pe.URL.Host == "my.mirror" { - return true - } - } - return false - } - s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} - - imageName, err := reference.WithName(IndexName + "/test/image") - if err != nil { - t.Error(err) - } - pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) - if err != nil { - t.Fatal(err) - } - if containsMirror(pushAPIEndpoints) { - t.Fatal("Push endpoint should not contain mirror") - } - - pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) - if err != nil { - t.Fatal(err) - } - if !containsMirror(pullAPIEndpoints) { - t.Fatal("Pull endpoint should contain mirror") - } -} - -func TestPushRegistryTag(t *testing.T) { - r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) - if err != nil { - t.Fatal(err) - } - err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) - if err != nil { - t.Fatal(err) - } -} - -func TestPushImageJSONIndex(t *testing.T) { - r := spawnTestRegistrySession(t) - imgData := []*ImgData{ - { - ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", - Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", - }, - { - ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", - Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", - }, - } - repoRef, err := reference.ParseNamed(REPO) - if err != nil { - t.Fatal(err) - } - repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) - if err != nil { - t.Fatal(err) - } - if repoData == nil { - t.Fatal("Expected RepositoryData object") - } - repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) - if err != nil { - t.Fatal(err) - } - if repoData == nil { - t.Fatal("Expected RepositoryData object") - } -} - -func TestSearchRepositories(t *testing.T) { - r := spawnTestRegistrySession(t) - results, err := r.SearchRepositories("fakequery", 25) - if err != nil { - t.Fatal(err) - } - if results == nil { - t.Fatal("Expected non-nil SearchResults object") - } - assertEqual(t, results.NumResults, 1, "Expected 1 search results") - assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") - assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") -} - -func TestTrustedLocation(t *testing.T) { - for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { - req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == true { - t.Fatalf("'%s' shouldn't be detected as a trusted location", url) - } - } - - for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { - req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == false { - t.Fatalf("'%s' should be detected as a trusted location", url) - } - } -} - -func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { - for _, urls := range [][]string{ - {"http://docker.io", "https://docker.com"}, - {"https://foo.docker.io:7777", "http://bar.docker.com"}, - {"https://foo.docker.io", "https://example.com"}, - } { - reqFrom, _ := http.NewRequest("GET", urls[0], nil) - reqFrom.Header.Add("Content-Type", "application/json") - reqFrom.Header.Add("Authorization", "super_secret") - reqTo, _ := http.NewRequest("GET", urls[1], nil) - - addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) - - if len(reqTo.Header) != 1 { - t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) - } - - if reqTo.Header.Get("Content-Type") != "application/json" { - t.Fatal("'Content-Type' should be 'application/json'") - } - - if reqTo.Header.Get("Authorization") != "" { - t.Fatal("'Authorization' should be empty") - } - } - - for _, urls := range [][]string{ - {"https://docker.io", "https://docker.com"}, - {"https://foo.docker.io:7777", "https://bar.docker.com"}, - } { - reqFrom, _ := http.NewRequest("GET", urls[0], nil) - reqFrom.Header.Add("Content-Type", "application/json") - reqFrom.Header.Add("Authorization", "super_secret") - reqTo, _ := http.NewRequest("GET", urls[1], nil) - - addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) - - if len(reqTo.Header) != 2 { - t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) - } - - if reqTo.Header.Get("Content-Type") != "application/json" { - t.Fatal("'Content-Type' should be 'application/json'") - } - - if reqTo.Header.Get("Authorization") != "super_secret" { - t.Fatal("'Authorization' should be 'super_secret'") - } - } -} - -func TestIsSecureIndex(t *testing.T) { - tests := []struct { - addr string - insecureRegistries []string - expected bool - }{ - {IndexName, nil, true}, - {"example.com", []string{}, true}, - {"example.com", []string{"example.com"}, false}, - {"localhost", []string{"localhost:5000"}, false}, - {"localhost:5000", []string{"localhost:5000"}, false}, - {"localhost", []string{"example.com"}, false}, - {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, - {"localhost", nil, false}, - {"localhost:5000", nil, false}, - {"127.0.0.1", nil, false}, - {"localhost", []string{"example.com"}, false}, - {"127.0.0.1", []string{"example.com"}, false}, - {"example.com", nil, true}, - {"example.com", []string{"example.com"}, false}, - {"127.0.0.1", []string{"example.com"}, false}, - {"127.0.0.1:5000", []string{"example.com"}, false}, - {"example.com:5000", []string{"42.42.0.0/16"}, false}, - {"example.com", []string{"42.42.0.0/16"}, false}, - {"example.com:5000", []string{"42.42.42.42/8"}, false}, - {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, - {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, - {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, - {"invalid.domain.com", []string{"invalid.domain.com"}, false}, - {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, - {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, - } - for _, tt := range tests { - config := makeServiceConfig(nil, tt.insecureRegistries) - if sec := isSecureIndex(config, tt.addr); sec != tt.expected { - t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) - } - } -} - -type debugTransport struct { - http.RoundTripper - log func(...interface{}) -} - -func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { - dump, err := httputil.DumpRequestOut(req, false) - if err != nil { - tr.log("could not dump request") - } - tr.log(string(dump)) - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - return nil, err - } - dump, err = httputil.DumpResponse(resp, false) - if err != nil { - tr.log("could not dump response") - } - tr.log(string(dump)) - return resp, err -} diff --git a/registry/service.go b/registry/service.go deleted file mode 100644 index dbc16284f..000000000 --- a/registry/service.go +++ /dev/null @@ -1,260 +0,0 @@ -package registry - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - ResolveIndex(name string) (*registrytypes.IndexInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) *DefaultService { - return &DefaultService{ - config: newServiceConfig(options), - } -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - return &s.config.ServiceConfig -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - serverAddress := authConfig.ServerAddress - if serverAddress == "" { - serverAddress = IndexServer - } - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", fmt.Errorf("unable to parse server address: %v", err) - } - - endpoints, err := s.LookupPushEndpoints(u.Host) - if err != nil { - return "", "", err - } - - for _, endpoint := range endpoints { - login := loginV2 - if endpoint.Version == APIVersion1 { - login = loginV1 - } - - status, token, err = login(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - err = fErr.err - logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) - continue - } - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - index, err := newIndexInfo(s.config, indexName) - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := DockerHeaders(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - localName := remoteName - if strings.HasPrefix(localName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - localName = strings.SplitN(localName, "/", 2)[1] - } - - return r.SearchRepositories(localName, limit) - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(s.config, name) -} - -// ResolveIndex takes indexName and returns index info -func (s *DefaultService) ResolveIndex(name string) (*registrytypes.IndexInfo, error) { - return newIndexInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.TLSConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. -// It gives preference to v2 endpoints over v1, mirrors over the actual -// registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - return s.lookupEndpoints(hostname) -} - -// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. -// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. -// Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - allEndpoints, err := s.lookupEndpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil -} diff --git a/registry/service_v1.go b/registry/service_v1.go deleted file mode 100644 index 5d7e89891..000000000 --- a/registry/service_v1.go +++ /dev/null @@ -1,53 +0,0 @@ -package registry - -import ( - "net/url" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - var cfg = tlsconfig.ServerDefault - tlsConfig := &cfg - if hostname == DefaultNamespace { - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV1Registry, - Version: APIVersion1, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - return endpoints, nil - } - - tlsConfig, err = s.TLSConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/registry/service_v2.go b/registry/service_v2.go deleted file mode 100644 index 5e62f8ff8..000000000 --- a/registry/service_v2.go +++ /dev/null @@ -1,79 +0,0 @@ -package registry - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - var cfg = tlsconfig.ServerDefault - tlsConfig := &cfg - if hostname == DefaultNamespace || hostname == DefaultV1Registry.Host { - // v2 mirrors - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - tlsConfig, err = s.TLSConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/registry/session.go b/registry/session.go deleted file mode 100644 index d48b9e8d2..000000000 --- a/registry/session.go +++ /dev/null @@ -1,783 +0,0 @@ -package registry - -import ( - "bytes" - "crypto/sha256" - "errors" - "sync" - // this is required for some certificates - _ "crypto/sha512" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -var ( - // ErrRepoNotFound is returned if the repository didn't exist on the - // remote side - ErrRepoNotFound = errors.New("Repository not found") -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - delete(tr.modReq, orig) - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// ID returns this registry session's ID. -func (r *Session) ID() string { - return r.id -} - -// GetRemoteHistory retrieves the history of a given image from the registry. -// It returns a list of the parent's JSON files (including the requested image). -func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - var history []string - if err := json.NewDecoder(res.Body).Decode(&history); err != nil { - return nil, fmt.Errorf("Error while reading the http response: %v", err) - } - - logrus.Debugf("Ancestry: %v", history) - return history, nil -} - -// LookupRemoteImage checks if an image exists in the registry -func (r *Session) LookupRemoteImage(imgID, registry string) error { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - return nil -} - -// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - // if the size header is not present, then set it to '-1' - imageSize := int64(-1) - if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.ParseInt(hdr, 10, 64) - if err != nil { - return nil, -1, err - } - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -// GetRemoteImageLayer retrieves an image layer from the registry -func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { - var ( - statusCode = 0 - res *http.Response - err error - imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) - ) - - req, err := http.NewRequest("GET", imageURL, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - statusCode = 0 - res, err = r.client.Do(req) - if err != nil { - logrus.Debugf("Error contacting registry %s: %v", registry, err) - // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 - if res != nil { - if res.Body != nil { - res.Body.Close() - } - statusCode = res.StatusCode - } - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - - if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { - logrus.Debug("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil - } - logrus.Debug("server doesn't support resume") - return res.Body, nil -} - -// GetRemoteTag retrieves the tag named in the askedTag argument from the given -// repository. It queries each of the registries supplied in the registries -// argument, and returns data from the first one that answers the query -// successfully. -func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) - res, err := r.client.Get(endpoint) - if err != nil { - return "", err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return "", ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - var tagID string - if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { - return "", err - } - return tagID, nil - } - return "", fmt.Errorf("Could not reach any registry endpoint") -} - -// GetRemoteTags retrieves all tags from the given repository. It queries each -// of the registries supplied in the registries argument, and returns data from -// the first one that answers the query successfully. It returns a map with -// tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - res, err := r.client.Get(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return nil, ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - result := make(map[string]string) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - return result, nil - } - return nil, fmt.Errorf("Could not reach any registry endpoint") -} - -func buildEndpointsList(headers []string, indexEp string) ([]string, error) { - var endpoints []string - parsedURL, err := url.Parse(indexEp) - if err != nil { - return nil, err - } - var urlScheme = parsedURL.Scheme - // The registry's URL scheme has to match the Index' - for _, ep := range headers { - epList := strings.Split(ep, ",") - for _, epListElement := range epList { - endpoints = append( - endpoints, - fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) - } - } - return endpoints, nil -} - -// GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) - - logrus.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := http.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - // check if the error is because of i/o timeout - // and return a non-obtuse error message for users - // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" - // was a top search on the docker user forum - if isTimeout(err) { - return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) - } - return nil, fmt.Errorf("Error while pulling image: %v", err) - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } else if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) - } - - var endpoints []string - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) - } - - remoteChecksums := []*ImgData{} - if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData, len(remoteChecksums)) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - }, nil -} - -// PushImageChecksumRegistry uploads checksums for an image -func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { - u := registry + "images/" + imgData.ID + "/checksum" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, nil) - if err != nil { - return err - } - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %v", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) - } - return nil -} - -// PushImageJSONRegistry pushes JSON metadata for a local image to the registry -func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { - - u := registry + "images/" + imgData.ID + "/json" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) - } - return nil -} - -// PushImageLayerRegistry sends the checksum of an image layer to the registry -func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - u := registry + "images/" + imgID + "/layer" - - logrus.Debugf("[registry] Calling PUT %s", u) - - tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) - if err != nil { - return "", "", err - } - h := sha256.New() - h.Write(jsonRaw) - h.Write([]byte{'\n'}) - checksumLayer := io.TeeReader(tarsumLayer, h) - - req, err := http.NewRequest("PUT", u, checksumLayer) - if err != nil { - return "", "", err - } - req.Header.Add("Content-Type", "application/octet-stream") - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - res, err := r.client.Do(req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %v", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// PushRegistryTag pushes a tag on the registry. -// Remote has the format '/ -func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) - - req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - req.ContentLength = int64(len(revision)) - res, err := r.client.Do(req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) - } - return nil -} - -// PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) - logrus.Debugf("[registry] PUT %s", u) - logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) - headers := map[string][]string{ - "Content-type": {"application/json"}, - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - "X-Docker-Token": {"true"}, - } - if validate { - headers["X-Docker-Endpoints"] = regs - } - - // Redirect if necessary - var res *http.Response - for { - if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { - return nil, err - } - if !shouldRedirect(res) { - break - } - res.Body.Close() - u = res.Header.Get("Location") - logrus.Debugf("Redirected to %s", u) - } - defer res.Body.Close() - - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - - var tokens, endpoints []string - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - - if res.Header.Get("X-Docker-Endpoints") == "" { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - } - - return &RepositoryData{ - Endpoints: endpoints, - }, nil -} - -func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { - req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - for k, v := range headers { - req.Header[k] = v - } - response, err := r.client.Do(req) - if err != nil { - return nil, err - } - return response, nil -} - -func shouldRedirect(response *http.Response) bool { - return response.StatusCode >= 300 && response.StatusCode < 400 -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) - } - result := new(registrytypes.SearchResults) - return result, json.NewDecoder(res.Body).Decode(result) -} - -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/registry/types.go b/registry/types.go deleted file mode 100644 index 601fa09ed..000000000 --- a/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry - -import ( - "github.com/docker/docker/reference" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// RepositoryData tracks the image list, list of endpoints, and list of tokens -// for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string - // Tokens is currently unused (remove it?) - Tokens []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool -} diff --git a/restartmanager/restartmanager.go b/restartmanager/restartmanager.go deleted file mode 100644 index 9893183a2..000000000 --- a/restartmanager/restartmanager.go +++ /dev/null @@ -1,128 +0,0 @@ -package restartmanager - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/docker/engine-api/types/container" -) - -const ( - backoffMultiplier = 2 - defaultTimeout = 100 * time.Millisecond -) - -// ErrRestartCanceled is returned when the restart manager has been -// canceled and will no longer restart the container. -var ErrRestartCanceled = errors.New("restart canceled") - -// RestartManager defines object that controls container restarting rules. -type RestartManager interface { - Cancel() error - ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) -} - -type restartManager struct { - sync.Mutex - sync.Once - policy container.RestartPolicy - restartCount int - timeout time.Duration - active bool - cancel chan struct{} - canceled bool -} - -// New returns a new restartmanager based on a policy. -func New(policy container.RestartPolicy, restartCount int) RestartManager { - return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} -} - -func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { - rm.Lock() - rm.policy = policy - rm.Unlock() -} - -func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { - if rm.policy.IsNone() { - return false, nil, nil - } - rm.Lock() - unlockOnExit := true - defer func() { - if unlockOnExit { - rm.Unlock() - } - }() - - if rm.canceled { - return false, nil, ErrRestartCanceled - } - - if rm.active { - return false, nil, fmt.Errorf("invalid call on active restartmanager") - } - // if the container ran for more than 10s, regardless of status and policy reset the - // the timeout back to the default. - if executionDuration.Seconds() >= 10 { - rm.timeout = 0 - } - if rm.timeout == 0 { - rm.timeout = defaultTimeout - } else { - rm.timeout *= backoffMultiplier - } - - var restart bool - switch { - case rm.policy.IsAlways(): - restart = true - case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: - restart = true - case rm.policy.IsOnFailure(): - // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count - if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { - restart = exitCode != 0 - } - } - - if !restart { - rm.active = false - return false, nil, nil - } - - rm.restartCount++ - - unlockOnExit = false - rm.active = true - rm.Unlock() - - ch := make(chan error) - go func() { - select { - case <-rm.cancel: - ch <- ErrRestartCanceled - close(ch) - case <-time.After(rm.timeout): - rm.Lock() - close(ch) - rm.active = false - rm.Unlock() - } - }() - - return true, ch, nil -} - -func (rm *restartManager) Cancel() error { - rm.Do(func() { - rm.Lock() - rm.canceled = true - close(rm.cancel) - rm.Unlock() - }) - return nil -} diff --git a/restartmanager/restartmanager_test.go b/restartmanager/restartmanager_test.go deleted file mode 100644 index 95a36b426..000000000 --- a/restartmanager/restartmanager_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package restartmanager - -import ( - "testing" - "time" - - "github.com/docker/engine-api/types/container" -) - -func TestRestartManagerTimeout(t *testing.T) { - rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) - should, _, err := rm.ShouldRestart(0, false, 1*time.Second) - if err != nil { - t.Fatal(err) - } - if !should { - t.Fatal("container should be restarted") - } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) - } -} - -func TestRestartManagerTimeoutReset(t *testing.T) { - rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) - rm.timeout = 5 * time.Second - _, _, err := rm.ShouldRestart(0, false, 10*time.Second) - if err != nil { - t.Fatal(err) - } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) - } -} diff --git a/runconfig/compare.go b/runconfig/compare.go deleted file mode 100644 index 61346aabf..000000000 --- a/runconfig/compare.go +++ /dev/null @@ -1,61 +0,0 @@ -package runconfig - -import "github.com/docker/engine-api/types/container" - -// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func Compare(a, b *container.Config) bool { - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty { - return false - } - - if len(a.Cmd) != len(b.Cmd) || - len(a.Env) != len(b.Env) || - len(a.Labels) != len(b.Labels) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for k, v := range a.Labels { - if v != b.Labels[k] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} diff --git a/runconfig/compare_test.go b/runconfig/compare_test.go deleted file mode 100644 index 9c17c553f..000000000 --- a/runconfig/compare_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package runconfig - -import ( - "testing" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// Just to make life easier -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestCompare(t *testing.T) { - ports1 := make(nat.PortSet) - ports1[newPortNoError("tcp", "1111")] = struct{}{} - ports1[newPortNoError("tcp", "2222")] = struct{}{} - ports2 := make(nat.PortSet) - ports2[newPortNoError("tcp", "3333")] = struct{}{} - ports2[newPortNoError("tcp", "4444")] = struct{}{} - ports3 := make(nat.PortSet) - ports3[newPortNoError("tcp", "1111")] = struct{}{} - ports3[newPortNoError("tcp", "2222")] = struct{}{} - ports3[newPortNoError("tcp", "5555")] = struct{}{} - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - volumes3 := make(map[string]struct{}) - volumes3["/test1"] = struct{}{} - volumes3["/test3"] = struct{}{} - envs1 := []string{"ENV1=value1", "ENV2=value2"} - envs2 := []string{"ENV1=value1", "ENV3=value3"} - entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} - entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} - entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} - cmd1 := strslice.StrSlice{"/bin/sh", "-c"} - cmd2 := strslice.StrSlice{"/bin/sh", "-d"} - cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} - labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} - labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} - labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} - - sameConfigs := map[*container.Config]*container.Config{ - // Empty config - &container.Config{}: {}, - // Does not compare hostname, domainname & image - &container.Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user", - }: { - Hostname: "host2", - Domainname: "domain2", - Image: "image2", - User: "user", - }, - // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: false}, - // only env - &container.Config{Env: envs1}: {Env: envs1}, - // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd1}, - // only labels - &container.Config{Labels: labels1}: {Labels: labels1}, - // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, - // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, - // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes1}, - } - differentConfigs := map[*container.Config]*container.Config{ - nil: nil, - &container.Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user1", - }: { - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user2", - }, - // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: true}, - &container.Config{OpenStdin: true}: {OpenStdin: false}, - // only env - &container.Config{Env: envs1}: {Env: envs2}, - // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd2}, - // not the same number of parts - &container.Config{Cmd: cmd1}: {Cmd: cmd3}, - // only labels - &container.Config{Labels: labels1}: {Labels: labels2}, - // not the same number of labels - &container.Config{Labels: labels1}: {Labels: labels3}, - // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, - // not the same number of ports - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, - // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, - // not the same number of parts - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, - // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes2}, - // not the same number of labels - &container.Config{Volumes: volumes1}: {Volumes: volumes3}, - } - for config1, config2 := range sameConfigs { - if !Compare(config1, config2) { - t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) - } - } - for config1, config2 := range differentConfigs { - if Compare(config1, config2) { - t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) - } - } -} diff --git a/runconfig/config.go b/runconfig/config.go deleted file mode 100644 index 8145e4b1d..000000000 --- a/runconfig/config.go +++ /dev/null @@ -1,90 +0,0 @@ -package runconfig - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerDecoder implements httputils.ContainerDecoder -// calling DecodeContainerConfig. -type ContainerDecoder struct{} - -// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - return DecodeContainerConfig(src) -} - -// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - return DecodeHostConfig(src) -} - -// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper -// struct and returns both a Config and a HostConfig struct -// Be aware this function is not checking whether the resulted structs are nil, -// it's your business to do so -func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var w ContainerConfigWrapper - - decoder := json.NewDecoder(src) - if err := decoder.Decode(&w); err != nil { - return nil, nil, nil, err - } - - hc := w.getHostConfig() - - // Perform platform-specific processing of Volumes and Binds. - if w.Config != nil && hc != nil { - - // Initialize the volumes map if currently nil - if w.Config.Volumes == nil { - w.Config.Volumes = make(map[string]struct{}) - } - - // Now validate all the volumes and binds - if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { - return nil, nil, nil, err - } - } - - // Certain parameters need daemon-side validation that cannot be done - // on the client, as only the daemon knows what is valid for the platform. - if err := ValidateNetMode(w.Config, hc); err != nil { - return nil, nil, nil, err - } - - // Validate isolation - if err := ValidateIsolation(hc); err != nil { - return nil, nil, nil, err - } - - // Validate QoS - if err := ValidateQoS(hc); err != nil { - return nil, nil, nil, err - } - return w.Config, hc, w.NetworkingConfig, nil -} - -// validateVolumesAndBindSettings validates each of the volumes and bind settings -// passed by the caller to ensure they are valid. -func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { - - // Ensure all volumes and binds are valid. - for spec := range c.Volumes { - if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("Invalid volume spec %q: %v", spec, err) - } - } - for _, spec := range hc.Binds { - if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) - } - } - - return nil -} diff --git a/runconfig/config_test.go b/runconfig/config_test.go deleted file mode 100644 index 5804b12d0..000000000 --- a/runconfig/config_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package runconfig - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "runtime" - "strings" - "testing" - - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/strslice" -) - -type f struct { - file string - entrypoint strslice.StrSlice -} - -func TestDecodeContainerConfig(t *testing.T) { - - var ( - fixtures []f - image string - ) - - if runtime.GOOS != "windows" { - image = "ubuntu" - fixtures = []f{ - {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, - {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, - {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, - } - } else { - image = "windows" - fixtures = []f{ - {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, - } - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Image != image { - t.Fatalf("Expected %s image, found %s\n", image, c.Image) - } - - if len(c.Entrypoint) != len(f.entrypoint) { - t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) - } - - if h != nil && h.Memory != 1000 { - t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) - } - } -} - -// TestDecodeContainerConfigIsolation validates isolation passed -// to the daemon in the hostConfig structure. Note this is platform specific -// as to what level of container isolation is supported. -func TestDecodeContainerConfigIsolation(t *testing.T) { - - // An invalid isolation level - if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { - t.Fatal(err) - } - } - - // Blank isolation (== default) - if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { - t.Fatal("Blank isolation should have succeeded") - } - - // Default isolation - if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { - t.Fatal("default isolation should have succeeded") - } - - // Process isolation (Valid on Windows only) - if runtime.GOOS == "windows" { - if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - t.Fatal("process isolation should have succeeded") - } - } else { - if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { - t.Fatal(err) - } - } - } - - // Hyper-V Containers isolation (Valid on Windows only) - if runtime.GOOS == "windows" { - if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - t.Fatal("hyperv isolation should have succeeded") - } - } else { - if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { - t.Fatal(err) - } - } - } -} - -// callDecodeContainerConfigIsolation is a utility function to call -// DecodeContainerConfig for validating isolation -func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - b []byte - err error - ) - w := ContainerConfigWrapper{ - Config: &container.Config{}, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Isolation: container.Isolation(isolation)}, - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - return DecodeContainerConfig(bytes.NewReader(b)) -} diff --git a/runconfig/config_unix.go b/runconfig/config_unix.go deleted file mode 100644 index e5902fb02..000000000 --- a/runconfig/config_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` - *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. -} - -// getHostConfig gets the HostConfig of the Config. -// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - hc := w.HostConfig - - if hc == nil && w.InnerHostConfig != nil { - hc = w.InnerHostConfig - } else if w.InnerHostConfig != nil { - if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { - w.InnerHostConfig.Memory = hc.Memory - } - if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { - w.InnerHostConfig.MemorySwap = hc.MemorySwap - } - if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { - w.InnerHostConfig.CPUShares = hc.CPUShares - } - if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { - w.InnerHostConfig.CpusetCpus = hc.CpusetCpus - } - - if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { - w.InnerHostConfig.VolumeDriver = hc.VolumeDriver - } - - hc = w.InnerHostConfig - } - - if hc != nil { - if w.Cpuset != "" && hc.CpusetCpus == "" { - hc.CpusetCpus = w.Cpuset - } - } - - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards compatible API behavior. - hc = SetDefaultNetModeIfBlank(hc) - - return hc -} diff --git a/runconfig/config_windows.go b/runconfig/config_windows.go deleted file mode 100644 index 50a523800..000000000 --- a/runconfig/config_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package runconfig - -import ( - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - HostConfig *container.HostConfig `json:"HostConfig,omitempty"` - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` -} - -// getHostConfig gets the HostConfig of the Config. -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - return w.HostConfig -} diff --git a/runconfig/errors.go b/runconfig/errors.go deleted file mode 100644 index d3608576e..000000000 --- a/runconfig/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -package runconfig - -import ( - "fmt" -) - -var ( - // ErrConflictContainerNetworkAndLinks conflict between --net=container and links - ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") - // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links - ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") - // ErrConflictSharedNetwork conflict between private and other networks - ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") - // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. - ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") - // ErrConflictNoNetwork conflict between private and other networks - ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") - // ErrConflictNetworkAndDNS conflict between --dns and the network mode - ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") - // ErrConflictNetworkHostname conflict between the hostname and the network mode - ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") - // ErrConflictHostNetworkAndLinks conflict between --net=host and links - ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") - // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode - ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") - // ErrConflictNetworkHosts conflict between add-host and the network mode - ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") - // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode - ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") - // ErrConflictNetworkExposePorts conflict between the expose option and the network mode - ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") - // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address - ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") - // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address - ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") - // ErrUnsupportedNetworkAndAlias conflict between network mode and alias - ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") - // ErrConflictUTSHostname conflict between the hostname and the UTS mode - ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") -) diff --git a/runconfig/fixtures/unix/container_config_1_14.json b/runconfig/fixtures/unix/container_config_1_14.json deleted file mode 100644 index b08334c09..000000000 --- a/runconfig/fixtures/unix/container_config_1_14.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "Hostname":"", - "Domainname": "", - "User":"", - "Memory": 1000, - "MemorySwap":0, - "CpuShares": 512, - "Cpuset": "0,1", - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "bash" - ], - "Image":"ubuntu", - "Volumes":{ - "/tmp": {} - }, - "WorkingDir":"", - "NetworkDisabled": false, - "ExposedPorts":{ - "22/tcp": {} - }, - "RestartPolicy": { "Name": "always" } -} diff --git a/runconfig/fixtures/unix/container_config_1_17.json b/runconfig/fixtures/unix/container_config_1_17.json deleted file mode 100644 index 0d780877b..000000000 --- a/runconfig/fixtures/unix/container_config_1_17.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "Cpuset": "0,1", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "bash", - "Image": "ubuntu", - "Volumes": { - "/tmp": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "SecurityOpt": [""], - "HostConfig": { - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [] - } -} diff --git a/runconfig/fixtures/unix/container_config_1_19.json b/runconfig/fixtures/unix/container_config_1_19.json deleted file mode 100644 index de49cf324..000000000 --- a/runconfig/fixtures/unix/container_config_1_19.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "bash", - "Image": "ubuntu", - "Labels": { - "com.example.vendor": "Acme", - "com.example.license": "GPL", - "com.example.version": "1.0" - }, - "Volumes": { - "/tmp": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "HostConfig": { - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "CpusetCpus": "0,1", - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" - } -} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_14.json b/runconfig/fixtures/unix/container_hostconfig_1_14.json deleted file mode 100644 index c72ac91ca..000000000 --- a/runconfig/fixtures/unix/container_hostconfig_1_14.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "Binds": ["/tmp:/tmp"], - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": ["/name:alias"], - "PublishAllPorts": false, - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"] -} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_19.json b/runconfig/fixtures/unix/container_hostconfig_1_19.json deleted file mode 100644 index 5ca8aa7e1..000000000 --- a/runconfig/fixtures/unix/container_hostconfig_1_19.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 512, - "CpuPeriod": 100000, - "CpusetCpus": "0,1", - "CpusetMems": "0,1", - "BlkioWeight": 300, - "OomKillDisable": false, - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" -} diff --git a/runconfig/fixtures/windows/container_config_1_19.json b/runconfig/fixtures/windows/container_config_1_19.json deleted file mode 100644 index 724320c76..000000000 --- a/runconfig/fixtures/windows/container_config_1_19.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "cmd", - "Image": "windows", - "Labels": { - "com.example.vendor": "Acme", - "com.example.license": "GPL", - "com.example.version": "1.0" - }, - "Volumes": { - "c:/windows": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "HostConfig": { - "Binds": ["c:/windows:d:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "CpusetCpus": "0,1", - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "default", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" - } -} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go deleted file mode 100644 index 769cc9f5d..000000000 --- a/runconfig/hostconfig.go +++ /dev/null @@ -1,35 +0,0 @@ -package runconfig - -import ( - "encoding/json" - "io" - - "github.com/docker/engine-api/types/container" -) - -// DecodeHostConfig creates a HostConfig based on the specified Reader. -// It assumes the content of the reader will be JSON, and decodes it. -func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - decoder := json.NewDecoder(src) - - var w ContainerConfigWrapper - if err := decoder.Decode(&w); err != nil { - return nil, err - } - - hc := w.getHostConfig() - return hc, nil -} - -// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure -// to default if it is not populated. This ensures backwards compatibility after -// the validation of the network mode was moved from the docker CLI to the -// docker daemon. -func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { - if hc != nil { - if hc.NetworkMode == container.NetworkMode("") { - hc.NetworkMode = container.NetworkMode("default") - } - } - return hc -} diff --git a/runconfig/hostconfig_solaris.go b/runconfig/hostconfig_solaris.go deleted file mode 100644 index 5c2e86120..000000000 --- a/runconfig/hostconfig_solaris.go +++ /dev/null @@ -1,47 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("default") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return false -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - switch mode := parts[0]; mode { - case "default", "none": - default: - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) - } - return nil -} - -// ValidateIsolation performs platform specific validation of the -// isolation level in the hostconfig structure. -// This setting is currently discarded for Solaris so this is a no-op. -func ValidateIsolation(hc *container.HostConfig) error { - return nil -} - -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { - return nil -} diff --git a/runconfig/hostconfig_test.go b/runconfig/hostconfig_test.go deleted file mode 100644 index 1fbc90a4c..000000000 --- a/runconfig/hostconfig_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/engine-api/types/container" -) - -// TODO Windows: This will need addressing for a Windows daemon. -func TestNetworkModeTest(t *testing.T) { - networkModes := map[container.NetworkMode][]bool{ - // private, bridge, host, container, none, default - "": {true, false, false, false, false, false}, - "something:weird": {true, false, false, false, false, false}, - "bridge": {true, true, false, false, false, false}, - DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, - "host": {false, false, true, false, false, false}, - "container:name": {false, false, false, true, false, false}, - "none": {true, false, false, false, true, false}, - "default": {true, false, false, false, false, true}, - } - networkModeNames := map[container.NetworkMode]string{ - "": "", - "something:weird": "something:weird", - "bridge": "bridge", - DefaultDaemonNetworkMode(): "bridge", - "host": "host", - "container:name": "container", - "none": "none", - "default": "default", - } - for networkMode, state := range networkModes { - if networkMode.IsPrivate() != state[0] { - t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) - } - if networkMode.IsBridge() != state[1] { - t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) - } - if networkMode.IsHost() != state[2] { - t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) - } - if networkMode.IsContainer() != state[3] { - t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) - } - if networkMode.IsNone() != state[4] { - t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) - } - if networkMode.IsDefault() != state[5] { - t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) - } - if networkMode.NetworkName() != networkModeNames[networkMode] { - t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) - } - } -} - -func TestIpcModeTest(t *testing.T) { - ipcModes := map[container.IpcMode][]bool{ - // private, host, container, valid - "": {true, false, false, true}, - "something:weird": {true, false, false, false}, - ":weird": {true, false, false, true}, - "host": {false, true, false, true}, - "container:name": {false, false, true, true}, - "container:name:something": {false, false, true, false}, - "container:": {false, false, true, false}, - } - for ipcMode, state := range ipcModes { - if ipcMode.IsPrivate() != state[0] { - t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) - } - if ipcMode.IsHost() != state[1] { - t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) - } - if ipcMode.IsContainer() != state[2] { - t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) - } - if ipcMode.Valid() != state[3] { - t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) - } - } - containerIpcModes := map[container.IpcMode]string{ - "": "", - "something": "", - "something:weird": "weird", - "container": "", - "container:": "", - "container:name": "name", - "container:name1:name2": "name1:name2", - } - for ipcMode, container := range containerIpcModes { - if ipcMode.Container() != container { - t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) - } - } -} - -func TestUTSModeTest(t *testing.T) { - utsModes := map[container.UTSMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for utsMode, state := range utsModes { - if utsMode.IsPrivate() != state[0] { - t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) - } - if utsMode.IsHost() != state[1] { - t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) - } - if utsMode.Valid() != state[2] { - t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) - } - } -} - -func TestUsernsModeTest(t *testing.T) { - usrensMode := map[container.UsernsMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for usernsMode, state := range usrensMode { - if usernsMode.IsPrivate() != state[0] { - t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) - } - if usernsMode.IsHost() != state[1] { - t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) - } - if usernsMode.Valid() != state[2] { - t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) - } - } -} - -func TestPidModeTest(t *testing.T) { - pidModes := map[container.PidMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for pidMode, state := range pidModes { - if pidMode.IsPrivate() != state[0] { - t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) - } - if pidMode.IsHost() != state[1] { - t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) - } - if pidMode.Valid() != state[2] { - t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) - } - } -} - -func TestRestartPolicy(t *testing.T) { - restartPolicies := map[container.RestartPolicy][]bool{ - // none, always, failure - container.RestartPolicy{}: {true, false, false}, - container.RestartPolicy{"something", 0}: {false, false, false}, - container.RestartPolicy{"no", 0}: {true, false, false}, - container.RestartPolicy{"always", 0}: {false, true, false}, - container.RestartPolicy{"on-failure", 0}: {false, false, true}, - } - for restartPolicy, state := range restartPolicies { - if restartPolicy.IsNone() != state[0] { - t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) - } - if restartPolicy.IsAlways() != state[1] { - t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) - } - if restartPolicy.IsOnFailure() != state[2] { - t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) - } - } -} -func TestDecodeHostConfig(t *testing.T) { - fixtures := []struct { - file string - }{ - {"fixtures/unix/container_hostconfig_1_14.json"}, - {"fixtures/unix/container_hostconfig_1_19.json"}, - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, err := DecodeHostConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Privileged != false { - t.Fatalf("Expected privileged false, found %v\n", c.Privileged) - } - - if l := len(c.Binds); l != 1 { - t.Fatalf("Expected 1 bind, found %d\n", l) - } - - if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { - t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) - } - - if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { - t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) - } - } -} diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go deleted file mode 100644 index c06b6ebfa..000000000 --- a/runconfig/hostconfig_unix.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !windows,!solaris - -package runconfig - -import ( - "fmt" - "runtime" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("bridge") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - n := container.NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - if parts[0] == "container" { - if len(parts) < 2 || parts[1] == "" { - return fmt.Errorf("--net: invalid net mode: invalid container format container:") - } - } - - if hc.NetworkMode.IsContainer() && c.Hostname != "" { - return ErrConflictNetworkHostname - } - - if hc.UTSMode.IsHost() && c.Hostname != "" { - return ErrConflictUTSHostname - } - - if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { - return ErrConflictHostNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { - return ErrConflictContainerNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { - return ErrConflictNetworkAndDNS - } - - if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { - return ErrConflictNetworkHosts - } - - if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { - return ErrConflictNetworkPublishPorts - } - - if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { - return ErrConflictNetworkExposePorts - } - return nil -} - -// ValidateIsolation performs platform specific validation of -// isolation in the hostconfig structure. Linux only supports "default" -// which is LXC container isolation -func ValidateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) - } - return nil -} - -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - - if hc.IOMaximumBandwidth != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) - } - - if hc.IOMaximumIOps != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) - } - return nil -} diff --git a/runconfig/hostconfig_windows.go b/runconfig/hostconfig_windows.go deleted file mode 100644 index d06452db1..000000000 --- a/runconfig/hostconfig_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("nat") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return !container.NetworkMode(network).IsUserDefined() -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - if len(parts) > 1 { - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) - } - return nil -} - -// ValidateIsolation performs platform specific validation of the -// isolation in the hostconfig structure. Windows supports 'default' (or -// blank), 'process', or 'hyperv'. -func ValidateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) - } - return nil -} - -// ValidateQoS performs platform specific validation of the Qos settings -func ValidateQoS(hc *container.HostConfig) error { - return nil -} diff --git a/runconfig/opts/envfile.go b/runconfig/opts/envfile.go deleted file mode 100644 index ba8b4f201..000000000 --- a/runconfig/opts/envfile.go +++ /dev/null @@ -1,67 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "strings" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - // trim the line from all leading whitespace first - line := strings.TrimLeft(scanner.Text(), whiteSpaces) - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/runconfig/opts/envfile_test.go b/runconfig/opts/envfile_test.go deleted file mode 100644 index 5dd7078bc..000000000 --- a/runconfig/opts/envfile_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/runconfig/opts/fixtures/valid.env b/runconfig/opts/fixtures/valid.env deleted file mode 100644 index 3afbdc81c..000000000 --- a/runconfig/opts/fixtures/valid.env +++ /dev/null @@ -1 +0,0 @@ -ENV1=value1 diff --git a/runconfig/opts/fixtures/valid.label b/runconfig/opts/fixtures/valid.label deleted file mode 100644 index b4208bdf8..000000000 --- a/runconfig/opts/fixtures/valid.label +++ /dev/null @@ -1 +0,0 @@ -LABEL1=value1 diff --git a/runconfig/opts/opts.go b/runconfig/opts/opts.go deleted file mode 100644 index b6d2e000e..000000000 --- a/runconfig/opts/opts.go +++ /dev/null @@ -1,70 +0,0 @@ -package opts - -import ( - "fmt" - fopts "github.com/docker/docker/opts" - "net" - "os" - "strings" -) - -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} diff --git a/runconfig/opts/opts_test.go b/runconfig/opts/opts_test.go deleted file mode 100644 index 69eac88ad..000000000 --- a/runconfig/opts/opts_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "strings" - "testing" -) - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateEnv(t *testing.T) { - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - "asd!qwe": "asd!qwe", - "1asd": "1asd", - "123": "123", - "some space": "some space", - " some space before": " some space before", - "some space after ": "some space after ", - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} diff --git a/runconfig/opts/parse.go b/runconfig/opts/parse.go deleted file mode 100644 index e6409cc4d..000000000 --- a/runconfig/opts/parse.go +++ /dev/null @@ -1,949 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "path" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/pflag" -) - -// ContainerOptions is a data object with all the options for creating a container -// TODO: remove fl prefix -type ContainerOptions struct { - flAttach opts.ListOpts - flVolumes opts.ListOpts - flTmpfs opts.ListOpts - flBlkioWeightDevice WeightdeviceOpt - flDeviceReadBps ThrottledeviceOpt - flDeviceWriteBps ThrottledeviceOpt - flLinks opts.ListOpts - flAliases opts.ListOpts - flLinkLocalIPs opts.ListOpts - flDeviceReadIOps ThrottledeviceOpt - flDeviceWriteIOps ThrottledeviceOpt - flEnv opts.ListOpts - flLabels opts.ListOpts - flDevices opts.ListOpts - flUlimits *UlimitOpt - flSysctls *opts.MapOpts - flPublish opts.ListOpts - flExpose opts.ListOpts - flDNS opts.ListOpts - flDNSSearch opts.ListOpts - flDNSOptions opts.ListOpts - flExtraHosts opts.ListOpts - flVolumesFrom opts.ListOpts - flEnvFile opts.ListOpts - flCapAdd opts.ListOpts - flCapDrop opts.ListOpts - flGroupAdd opts.ListOpts - flSecurityOpt opts.ListOpts - flStorageOpt opts.ListOpts - flLabelsFile opts.ListOpts - flLoggingOpts opts.ListOpts - flPrivileged bool - flPidMode string - flUTSMode string - flUsernsMode string - flPublishAll bool - flStdin bool - flTty bool - flOomKillDisable bool - flOomScoreAdj int - flContainerIDFile string - flEntrypoint string - flHostname string - flMemoryString string - flMemoryReservation string - flMemorySwap string - flKernelMemory string - flUser string - flWorkingDir string - flCPUShares int64 - flCPUPercent int64 - flCPUPeriod int64 - flCPUQuota int64 - flCpusetCpus string - flCpusetMems string - flBlkioWeight uint16 - flIOMaxBandwidth string - flIOMaxIOps uint64 - flSwappiness int64 - flNetMode string - flMacAddress string - flIPv4Address string - flIPv6Address string - flIpcMode string - flPidsLimit int64 - flRestartPolicy string - flReadonlyRootfs bool - flLoggingDriver string - flCgroupParent string - flVolumeDriver string - flStopSignal string - flIsolation string - flShmSize string - flNoHealthcheck bool - flHealthCmd string - flHealthInterval time.Duration - flHealthTimeout time.Duration - flHealthRetries int - flRuntime string - - Image string - Args []string -} - -// AddFlags adds all command line flags that will be used by Parse to the FlagSet -func AddFlags(flags *pflag.FlagSet) *ContainerOptions { - copts := &ContainerOptions{ - flAliases: opts.NewListOpts(nil), - flAttach: opts.NewListOpts(ValidateAttach), - flBlkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), - flCapAdd: opts.NewListOpts(nil), - flCapDrop: opts.NewListOpts(nil), - flDNS: opts.NewListOpts(opts.ValidateIPAddress), - flDNSOptions: opts.NewListOpts(nil), - flDNSSearch: opts.NewListOpts(opts.ValidateDNSSearch), - flDeviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - flDeviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - flDeviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - flDeviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - flDevices: opts.NewListOpts(ValidateDevice), - flEnv: opts.NewListOpts(ValidateEnv), - flEnvFile: opts.NewListOpts(nil), - flExpose: opts.NewListOpts(nil), - flExtraHosts: opts.NewListOpts(ValidateExtraHost), - flGroupAdd: opts.NewListOpts(nil), - flLabels: opts.NewListOpts(ValidateEnv), - flLabelsFile: opts.NewListOpts(nil), - flLinkLocalIPs: opts.NewListOpts(nil), - flLinks: opts.NewListOpts(ValidateLink), - flLoggingOpts: opts.NewListOpts(nil), - flPublish: opts.NewListOpts(nil), - flSecurityOpt: opts.NewListOpts(nil), - flStorageOpt: opts.NewListOpts(nil), - flSysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), - flTmpfs: opts.NewListOpts(nil), - flUlimits: NewUlimitOpt(nil), - flVolumes: opts.NewListOpts(nil), - flVolumesFrom: opts.NewListOpts(nil), - } - - // General purpose flags - flags.VarP(&copts.flAttach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") - flags.Var(&copts.flDevices, "device", "Add a host device to the container") - flags.VarP(&copts.flEnv, "env", "e", "Set environment variables") - flags.Var(&copts.flEnvFile, "env-file", "Read in a file of environment variables") - flags.StringVar(&copts.flEntrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") - flags.Var(&copts.flGroupAdd, "group-add", "Add additional groups to join") - flags.StringVarP(&copts.flHostname, "hostname", "h", "", "Container host name") - flags.BoolVarP(&copts.flStdin, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.VarP(&copts.flLabels, "label", "l", "Set meta data on a container") - flags.Var(&copts.flLabelsFile, "label-file", "Read in a line delimited file of labels") - flags.BoolVar(&copts.flReadonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") - flags.StringVar(&copts.flRestartPolicy, "restart", "no", "Restart policy to apply when a container exits") - flags.StringVar(&copts.flStopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) - flags.Var(copts.flSysctls, "sysctl", "Sysctl options") - flags.BoolVarP(&copts.flTty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.Var(copts.flUlimits, "ulimit", "Ulimit options") - flags.StringVarP(&copts.flUser, "user", "u", "", "Username or UID (format: [:])") - flags.StringVarP(&copts.flWorkingDir, "workdir", "w", "", "Working directory inside the container") - - // Security - flags.Var(&copts.flCapAdd, "cap-add", "Add Linux capabilities") - flags.Var(&copts.flCapDrop, "cap-drop", "Drop Linux capabilities") - flags.BoolVar(&copts.flPrivileged, "privileged", false, "Give extended privileges to this container") - flags.Var(&copts.flSecurityOpt, "security-opt", "Security Options") - flags.StringVar(&copts.flUsernsMode, "userns", "", "User namespace to use") - - // Network and port publishing flag - flags.Var(&copts.flExtraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.Var(&copts.flDNS, "dns", "Set custom DNS servers") - flags.Var(&copts.flDNSOptions, "dns-opt", "Set DNS options") - flags.Var(&copts.flDNSSearch, "dns-search", "Set custom DNS search domains") - flags.Var(&copts.flExpose, "expose", "Expose a port or a range of ports") - flags.StringVar(&copts.flIPv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") - flags.StringVar(&copts.flIPv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") - flags.Var(&copts.flLinks, "link", "Add link to another container") - flags.Var(&copts.flLinkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") - flags.StringVar(&copts.flMacAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flags.VarP(&copts.flPublish, "publish", "p", "Publish a container's port(s) to the host") - flags.BoolVarP(&copts.flPublishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") - // We allow for both "--net" and "--network", although the latter is the recommended way. - flags.StringVar(&copts.flNetMode, "net", "default", "Connect a container to a network") - flags.StringVar(&copts.flNetMode, "network", "default", "Connect a container to a network") - flags.MarkHidden("net") - // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. - flags.Var(&copts.flAliases, "net-alias", "Add network-scoped alias for the container") - flags.Var(&copts.flAliases, "network-alias", "Add network-scoped alias for the container") - flags.MarkHidden("net-alias") - - // Logging and storage - flags.StringVar(&copts.flLoggingDriver, "log-driver", "", "Logging driver for container") - flags.StringVar(&copts.flVolumeDriver, "volume-driver", "", "Optional volume driver for the container") - flags.Var(&copts.flLoggingOpts, "log-opt", "Log driver options") - flags.Var(&copts.flStorageOpt, "storage-opt", "Set storage driver options per container") - flags.Var(&copts.flTmpfs, "tmpfs", "Mount a tmpfs directory") - flags.Var(&copts.flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - flags.VarP(&copts.flVolumes, "volume", "v", "Bind mount a volume") - - // Health-checking - flags.StringVar(&copts.flHealthCmd, "health-cmd", "", "Command to run to check health") - flags.DurationVar(&copts.flHealthInterval, "health-interval", 0, "Time between running the check") - flags.IntVar(&copts.flHealthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") - flags.DurationVar(&copts.flHealthTimeout, "health-timeout", 0, "Maximum time to allow one check to run") - flags.BoolVar(&copts.flNoHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") - - // Resource management - flags.Uint16Var(&copts.flBlkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000") - flags.Var(&copts.flBlkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") - flags.StringVar(&copts.flContainerIDFile, "cidfile", "", "Write the container ID to the file") - flags.StringVar(&copts.flCpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&copts.flCpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64Var(&copts.flCPUPercent, "cpu-percent", 0, "CPU percent (Windows only)") - flags.Int64Var(&copts.flCPUPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&copts.flCPUQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64VarP(&copts.flCPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Var(&copts.flDeviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") - flags.Var(&copts.flDeviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") - flags.Var(&copts.flDeviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") - flags.Var(&copts.flDeviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") - flags.StringVar(&copts.flIOMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") - flags.Uint64Var(&copts.flIOMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") - flags.StringVar(&copts.flKernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVarP(&copts.flMemoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&copts.flMemoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&copts.flMemorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Int64Var(&copts.flSwappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") - flags.BoolVar(&copts.flOomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") - flags.IntVar(&copts.flOomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") - flags.Int64Var(&copts.flPidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") - - // Low-level execution (cgroups, namespaces, ...) - flags.StringVar(&copts.flCgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&copts.flIpcMode, "ipc", "", "IPC namespace to use") - flags.StringVar(&copts.flIsolation, "isolation", "", "Container isolation technology") - flags.StringVar(&copts.flPidMode, "pid", "", "PID namespace to use") - flags.StringVar(&copts.flShmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.StringVar(&copts.flUTSMode, "uts", "", "UTS namespace to use") - flags.StringVar(&copts.flRuntime, "runtime", "", "Runtime to use for this container") - return copts -} - -// Parse parses the args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - attachStdin = copts.flAttach.Get("stdin") - attachStdout = copts.flAttach.Get("stdout") - attachStderr = copts.flAttach.Get("stderr") - ) - - // Validate the input mac address - if copts.flMacAddress != "" { - if _, err := ValidateMACAddress(copts.flMacAddress); err != nil { - return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.flMacAddress) - } - } - if copts.flStdin { - attachStdin = true - } - // If -a is not set, attach to stdout and stderr - if copts.flAttach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - var flMemory int64 - if copts.flMemoryString != "" { - flMemory, err = units.RAMInBytes(copts.flMemoryString) - if err != nil { - return nil, nil, nil, err - } - } - - var MemoryReservation int64 - if copts.flMemoryReservation != "" { - MemoryReservation, err = units.RAMInBytes(copts.flMemoryReservation) - if err != nil { - return nil, nil, nil, err - } - } - - var memorySwap int64 - if copts.flMemorySwap != "" { - if copts.flMemorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(copts.flMemorySwap) - if err != nil { - return nil, nil, nil, err - } - } - } - - var KernelMemory int64 - if copts.flKernelMemory != "" { - KernelMemory, err = units.RAMInBytes(copts.flKernelMemory) - if err != nil { - return nil, nil, nil, err - } - } - - swappiness := copts.flSwappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - var shmSize int64 - if copts.flShmSize != "" { - shmSize, err = units.RAMInBytes(copts.flShmSize) - if err != nil { - return nil, nil, nil, err - } - } - - // TODO FIXME units.RAMInBytes should have a uint64 version - var maxIOBandwidth int64 - if copts.flIOMaxBandwidth != "" { - maxIOBandwidth, err = units.RAMInBytes(copts.flIOMaxBandwidth) - if err != nil { - return nil, nil, nil, err - } - if maxIOBandwidth < 0 { - return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.flIOMaxBandwidth) - } - } - - var binds []string - // add any bind targets to the list of container volumes - for bind := range copts.flVolumes.GetMap() { - if arr := volumeSplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the copts.flVolumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - copts.flVolumes.Delete(bind) - } - } - - // Can't evaluate options passed into --tmpfs until we actually mount - tmpfs := make(map[string]string) - for _, t := range copts.flTmpfs.GetAll() { - if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { - if _, _, err := mount.ParseTmpfsOptions(arr[1]); err != nil { - return nil, nil, nil, err - } - tmpfs[arr[0]] = arr[1] - } else { - tmpfs[arr[0]] = "" - } - } - - var ( - runCmd strslice.StrSlice - entrypoint strslice.StrSlice - ) - if len(copts.Args) > 0 { - runCmd = strslice.StrSlice(copts.Args) - } - if copts.flEntrypoint != "" { - entrypoint = strslice.StrSlice{copts.flEntrypoint} - } - - ports, portBindings, err := nat.ParsePortSpecs(copts.flPublish.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range copts.flExpose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := nat.ParsePortRange(port) - if err != nil { - return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, nil, nil, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []container.DeviceMapping{} - for _, device := range copts.flDevices.GetAll() { - deviceMapping, err := ParseDevice(device) - if err != nil { - return nil, nil, nil, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := readKVStrings(copts.flEnvFile.GetAll(), copts.flEnv.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // collect all the labels for the container - labels, err := readKVStrings(copts.flLabelsFile.GetAll(), copts.flLabels.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - ipcMode := container.IpcMode(copts.flIpcMode) - if !ipcMode.Valid() { - return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") - } - - pidMode := container.PidMode(copts.flPidMode) - if !pidMode.Valid() { - return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") - } - - utsMode := container.UTSMode(copts.flUTSMode) - if !utsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") - } - - usernsMode := container.UsernsMode(copts.flUsernsMode) - if !usernsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") - } - - restartPolicy, err := ParseRestartPolicy(copts.flRestartPolicy) - if err != nil { - return nil, nil, nil, err - } - - loggingOpts, err := parseLoggingOpts(copts.flLoggingDriver, copts.flLoggingOpts.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - securityOpts, err := parseSecurityOpts(copts.flSecurityOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - storageOpts, err := parseStorageOpts(copts.flStorageOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Healthcheck - var healthConfig *container.HealthConfig - haveHealthSettings := copts.flHealthCmd != "" || - copts.flHealthInterval != 0 || - copts.flHealthTimeout != 0 || - copts.flHealthRetries != 0 - if copts.flNoHealthcheck { - if haveHealthSettings { - return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") - } - test := strslice.StrSlice{"NONE"} - healthConfig = &container.HealthConfig{Test: test} - } else if haveHealthSettings { - var probe strslice.StrSlice - if copts.flHealthCmd != "" { - args := []string{"CMD-SHELL", copts.flHealthCmd} - probe = strslice.StrSlice(args) - } - if copts.flHealthInterval < 0 { - return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") - } - if copts.flHealthTimeout < 0 { - return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") - } - - healthConfig = &container.HealthConfig{ - Test: probe, - Interval: copts.flHealthInterval, - Timeout: copts.flHealthTimeout, - Retries: copts.flHealthRetries, - } - } - - resources := container.Resources{ - CgroupParent: copts.flCgroupParent, - Memory: flMemory, - MemoryReservation: MemoryReservation, - MemorySwap: memorySwap, - MemorySwappiness: &copts.flSwappiness, - KernelMemory: KernelMemory, - OomKillDisable: &copts.flOomKillDisable, - CPUPercent: copts.flCPUPercent, - CPUShares: copts.flCPUShares, - CPUPeriod: copts.flCPUPeriod, - CpusetCpus: copts.flCpusetCpus, - CpusetMems: copts.flCpusetMems, - CPUQuota: copts.flCPUQuota, - PidsLimit: copts.flPidsLimit, - BlkioWeight: copts.flBlkioWeight, - BlkioWeightDevice: copts.flBlkioWeightDevice.GetList(), - BlkioDeviceReadBps: copts.flDeviceReadBps.GetList(), - BlkioDeviceWriteBps: copts.flDeviceWriteBps.GetList(), - BlkioDeviceReadIOps: copts.flDeviceReadIOps.GetList(), - BlkioDeviceWriteIOps: copts.flDeviceWriteIOps.GetList(), - IOMaximumIOps: copts.flIOMaxIOps, - IOMaximumBandwidth: uint64(maxIOBandwidth), - Ulimits: copts.flUlimits.GetList(), - Devices: deviceMappings, - } - - config := &container.Config{ - Hostname: copts.flHostname, - ExposedPorts: ports, - User: copts.flUser, - Tty: copts.flTty, - // TODO: deprecated, it comes from -n, --networking - // it's still needed internally to set the network to disabled - // if e.g. bridge is none in daemon opts, and in inspect - NetworkDisabled: false, - OpenStdin: copts.flStdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: copts.Image, - Volumes: copts.flVolumes.GetMap(), - MacAddress: copts.flMacAddress, - Entrypoint: entrypoint, - WorkingDir: copts.flWorkingDir, - Labels: ConvertKVStringsToMap(labels), - Healthcheck: healthConfig, - } - if flags.Changed("stop-signal") { - config.StopSignal = copts.flStopSignal - } - - hostConfig := &container.HostConfig{ - Binds: binds, - ContainerIDFile: copts.flContainerIDFile, - OomScoreAdj: copts.flOomScoreAdj, - Privileged: copts.flPrivileged, - PortBindings: portBindings, - Links: copts.flLinks.GetAll(), - PublishAllPorts: copts.flPublishAll, - // Make sure the dns fields are never nil. - // New containers don't ever have those fields nil, - // but pre created containers can still have those nil values. - // See https://github.com/docker/docker/pull/17779 - // for a more detailed explanation on why we don't want that. - DNS: copts.flDNS.GetAllOrEmpty(), - DNSSearch: copts.flDNSSearch.GetAllOrEmpty(), - DNSOptions: copts.flDNSOptions.GetAllOrEmpty(), - ExtraHosts: copts.flExtraHosts.GetAll(), - VolumesFrom: copts.flVolumesFrom.GetAll(), - NetworkMode: container.NetworkMode(copts.flNetMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - UsernsMode: usernsMode, - CapAdd: strslice.StrSlice(copts.flCapAdd.GetAll()), - CapDrop: strslice.StrSlice(copts.flCapDrop.GetAll()), - GroupAdd: copts.flGroupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: securityOpts, - StorageOpt: storageOpts, - ReadonlyRootfs: copts.flReadonlyRootfs, - LogConfig: container.LogConfig{Type: copts.flLoggingDriver, Config: loggingOpts}, - VolumeDriver: copts.flVolumeDriver, - Isolation: container.Isolation(copts.flIsolation), - ShmSize: shmSize, - Resources: resources, - Tmpfs: tmpfs, - Sysctls: copts.flSysctls.GetAll(), - Runtime: copts.flRuntime, - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - - networkingConfig := &networktypes.NetworkingConfig{ - EndpointsConfig: make(map[string]*networktypes.EndpointSettings), - } - - if copts.flIPv4Address != "" || copts.flIPv6Address != "" || copts.flLinkLocalIPs.Len() > 0 { - epConfig := &networktypes.EndpointSettings{} - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - - epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: copts.flIPv4Address, - IPv6Address: copts.flIPv6Address, - } - - if copts.flLinkLocalIPs.Len() > 0 { - epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.flLinkLocalIPs.Len()) - copy(epConfig.IPAMConfig.LinkLocalIPs, copts.flLinkLocalIPs.GetAll()) - } - } - - if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Links = make([]string, len(hostConfig.Links)) - copy(epConfig.Links, hostConfig.Links) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - if copts.flAliases.Len() > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Aliases = make([]string, copts.flAliases.Len()) - copy(epConfig.Aliases, copts.flAliases.GetAll()) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - return config, hostConfig, networkingConfig, nil -} - -// reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func readKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// takes a local seccomp daemon, reads the file contents for sending to the daemon -func parseSecurityOpts(securityOpts []string) ([]string, error) { - for key, opt := range securityOpts { - con := strings.SplitN(opt, "=", 2) - if len(con) == 1 && con[0] != "no-new-privileges" { - if strings.Index(opt, ":") != -1 { - con = strings.SplitN(opt, ":", 2) - } else { - return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) - } - } - if con[0] == "seccomp" && con[1] != "unconfined" { - f, err := ioutil.ReadFile(con[1]) - if err != nil { - return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) - } - b := bytes.NewBuffer(nil) - if err := json.Compact(b, f); err != nil { - return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) - } - securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) - } - } - - return securityOpts, nil -} - -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, fmt.Errorf("Invalid storage option.") - } - } - return m, nil -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - var ( - parts = strings.Split(policy, ":") - name = parts[0] - ) - - p.Name = name - switch name { - case "always", "unless-stopped": - if len(parts) > 1 { - return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) - } - case "no": - // do nothing - case "on-failure": - if len(parts) > 2 { - return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, err - } - - p.MaximumRetryCount = count - } - default: - return p, fmt.Errorf("invalid restart policy %s", name) - } - - return p, nil -} - -// ParseDevice parses a device mapping string to a container.DeviceMapping struct -func ParseDevice(device string) (container.DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if ValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := container.DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// In Windows driver letter appears in two situations: -// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) -// b. A string in the format like `\\?\C:\Windows\...` (UNC). -// Therefore, a driver letter can only follow either a `:` or `\\` -// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. -func volumeSplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} diff --git a/runconfig/opts/parse_test.go b/runconfig/opts/parse_test.go deleted file mode 100644 index d0acd8be8..000000000 --- a/runconfig/opts/parse_test.go +++ /dev/null @@ -1,870 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/spf13/pflag" -) - -func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - flags := pflag.NewFlagSet("run", pflag.ContinueOnError) - flags.SetOutput(ioutil.Discard) - flags.Usage = nil - copts := AddFlags(flags) - if err := flags.Parse(args); err != nil { - return nil, nil, nil, err - } - return Parse(flags, copts) -} - -func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d --rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - - // A single volume - arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) - } - - // Two volumes - arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) - } - - // A single bind-mount - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) - } - - // Two bind-mounts. - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Two bind-mounts, first read-only, second read-write. - // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Similar to previous test but with alternate modes which are only supported by Linux - if runtime.GOOS != "windows" { - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - } - - // One bind mount and one volume - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) - } - - // Root to non-c: drive letter (Windows specific) - if runtime.GOOS == "windows" { - arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { - t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) - } - } - -} - -// This tests the cases for binds which are generated through -// DecodeContainerConfig rather than Parse() -func TestDecodeContainerConfigVolumes(t *testing.T) { - - // Root to root - bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // No destination path - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // // No destination path or mode - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing - bindsOrVols = []string{`:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing with no mode - bindsOrVols = []string{`::`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Too much including an invalid mode - wTmp := os.Getenv("TEMP") - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Windows specific error tests - if runtime.GOOS == "windows" { - // Volume which does not include a drive letter - bindsOrVols = []string{`\tmp`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Root to C-Drive - bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Container path that does not include a drive letter - bindsOrVols = []string{`c:\windows:\somewhere`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - } - - // Linux-specific error tests - if runtime.GOOS != "windows" { - // Just root - bindsOrVols = []string{`/`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A single volume that looks like a bind mount passed in Volumes. - // This should be handled as a bind mount, not a volume. - vols := []string{`/foo:/bar`} - if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { - t.Fatal("Volume /foo:/bar should have succeeded as a volume name") - } else if hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes[vols[0]]; !exists { - t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) - } - - } -} - -// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes -// to call DecodeContainerConfig. It effectively does what a client would -// do when calling the daemon by constructing a JSON stream of a -// ContainerConfigWrapper which is populated by the set of volume specs -// passed into it. It returns a config and a hostconfig which can be -// validated to ensure DecodeContainerConfig has manipulated the structures -// correctly. -func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { - var ( - b []byte - err error - c *container.Config - h *container.HostConfig - ) - w := runconfig.ContainerConfigWrapper{ - Config: &container.Config{ - Volumes: map[string]struct{}{}, - }, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Binds: binds, - }, - } - for _, v := range volumes { - w.Config.Volumes[v] = struct{}{} - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) - } - if c == nil || h == nil { - return nil, nil, fmt.Errorf("Empty config or hostconfig") - } - - return c, h, err -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} - -// setupPlatformVolume takes two arrays of volume specs - a Unix style -// spec and a Windows style spec. Depending on the platform being unit tested, -// it returns one of them, along with a volume string that would be passed -// on the docker CLI (eg -v /bar -v /foo). -func setupPlatformVolume(u []string, w []string) ([]string, string) { - var a []string - if runtime.GOOS == "windows" { - a = w - } else { - a = u - } - s := "" - for _, v := range a { - s = s + "-v " + v + " " - } - return a, s -} - -// Simple parse with MacAddress validation -func TestParseWithMacAddress(t *testing.T) { - invalidMacAddress := "--mac-address=invalidMacAddress" - validMacAddress := "--mac-address=92:d0:c6:0a:29:33" - if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { - t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) - } - if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { - t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) - } -} - -func TestParseWithMemory(t *testing.T) { - invalidMemory := "--memory=invalid" - validMemory := "--memory=1G" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { - t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) - } -} - -func TestParseWithMemorySwap(t *testing.T) { - invalidMemory := "--memory-swap=invalid" - validMemory := "--memory-swap=1G" - anotherValidMemory := "--memory-swap=-1" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { - t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } - if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { - t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } -} - -func TestParseHostname(t *testing.T) { - validHostnames := map[string]string{ - "hostname": "hostname", - "host-name": "host-name", - "hostname123": "hostname123", - "123hostname": "123hostname", - "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", - } - hostnameWithDomain := "--hostname=hostname.domainname" - hostnameWithDomainTld := "--hostname=hostname.domainname.tld" - for hostname, expectedHostname := range validHostnames { - if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - } - if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) - } -} - -func TestParseWithExpose(t *testing.T) { - invalids := map[string]string{ - ":": "invalid port format for --expose: :", - "8080:9090": "invalid port format for --expose: 8080:9090", - "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", - "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", - "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, - } - valids := map[string][]nat.Port{ - "8080/tcp": {"8080/tcp"}, - "8080/udp": {"8080/udp"}, - "8080/ncp": {"8080/ncp"}, - "8080-8080/udp": {"8080/udp"}, - "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, - } - for expose, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) - } - } - for expose, exposedPorts := range valids { - config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != len(exposedPorts) { - t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) - } - for _, port := range exposedPorts { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) - } - } - } - // Merge with actual published port - config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != 2 { - t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) - } - ports := []nat.Port{"80/tcp", "81/tcp"} - for _, port := range ports { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) - } - } -} - -func TestParseDevice(t *testing.T) { - valids := map[string]container.DeviceMapping{ - "/dev/snd": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rwm", - }, - "/dev/snd:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rw", - }, - "/dev/snd:/something": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rwm", - }, - "/dev/snd:/something:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rw", - }, - } - for device, deviceMapping := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(hostconfig.Devices) != 1 { - t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) - } - if hostconfig.Devices[0] != deviceMapping { - t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) - } - } - -} - -func TestParseModes(t *testing.T) { - // ipc ko - if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { - t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) - } - // ipc ok - _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.IpcMode.Valid() { - t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) - } - // pid ko - if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { - t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) - } - // pid ok - _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.PidMode.Valid() { - t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) - } - // uts ko - if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { - t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) - } - // uts ok - _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.UTSMode.Valid() { - t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) - } - // shm-size ko - if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { - t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) - } - // shm-size ok - _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.ShmSize != 134217728 { - t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) - } -} - -func TestParseRestartPolicy(t *testing.T) { - invalids := map[string]string{ - "something": "invalid restart policy something", - "always:2": "maximum restart count not valid with restart policy of \"always\"", - "always:2:3": "maximum restart count not valid with restart policy of \"always\"", - "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, - "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", - } - valids := map[string]container.RestartPolicy{ - "": {}, - "always": { - Name: "always", - MaximumRetryCount: 0, - }, - "on-failure:1": { - Name: "on-failure", - MaximumRetryCount: 1, - }, - } - for restart, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) - } - } - for restart, expected := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.RestartPolicy != expected { - t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) - } - } -} - -func TestParseHealth(t *testing.T) { - checkOk := func(args ...string) *container.HealthConfig { - config, _, _, err := parseRun(args) - if err != nil { - t.Fatalf("%#v: %v", args, err) - } - return config.Healthcheck - } - checkError := func(expected string, args ...string) { - config, _, _, err := parseRun(args) - if err == nil { - t.Fatalf("Expected error, but got %#v", config) - } - if err.Error() != expected { - t.Fatalf("Expected %#v, got %#v", expected, err) - } - } - health := checkOk("--no-healthcheck", "img", "cmd") - if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { - t.Fatalf("--no-healthcheck failed: %#v", health) - } - - health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") - if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { - t.Fatalf("--health-cmd: got %#v", health.Test) - } - if health.Timeout != 0 { - t.Fatalf("--health-cmd: timeout = %f", health.Timeout) - } - - checkError("--no-healthcheck conflicts with --health-* options", - "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") - - health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") - if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { - t.Fatalf("--health-*: got %#v", health) - } -} - -func TestParseLoggingOpts(t *testing.T) { - // logging opts ko - if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { - t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) - } - // logging opts ok - _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { - t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) - } -} - -func TestParseEnvfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // env ok - config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { - t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) - } - config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { - t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) - } -} - -func TestParseLabelfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // label ok - config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { - t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) - } - config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { - t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) - } -} - -func TestParseEntryPoint(t *testing.T) { - config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) - if err != nil { - t.Fatal(err) - } - if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { - t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestVolumeSplitN(t *testing.T) { - for _, x := range []struct { - input string - n int - expected []string - }{ - {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, - {`:C:\foo:d:`, -1, nil}, - {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, - {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, - {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, - - {`d:\`, -1, []string{`d:\`}}, - {`d:`, -1, []string{`d:`}}, - {`d:\path`, -1, []string{`d:\path`}}, - {`d:\path with space`, -1, []string{`d:\path with space`}}, - {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, - {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, - {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, - {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, - {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, - {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, - {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, - {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, - {`name:D:`, -1, []string{`name`, `D:`}}, - {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, - {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, - {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, - {`c:\Windows`, -1, []string{`c:\Windows`}}, - {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, - - {``, -1, nil}, - {`.`, -1, []string{`.`}}, - {`..\`, -1, []string{`..\`}}, - {`c:\:..\`, -1, []string{`c:\`, `..\`}}, - {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, - - // Cover directories with one-character name - {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, - } { - res := volumeSplitN(x.input, x.n) - if len(res) < len(x.expected) { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - for i, e := range res { - if e != x.expected[i] { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - } - } -} diff --git a/runconfig/opts/runtime.go b/runconfig/opts/runtime.go deleted file mode 100644 index 1fc099834..000000000 --- a/runconfig/opts/runtime.go +++ /dev/null @@ -1,74 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} diff --git a/runconfig/opts/throttledevice.go b/runconfig/opts/throttledevice.go deleted file mode 100644 index f69e74ecc..000000000 --- a/runconfig/opts/throttledevice.go +++ /dev/null @@ -1,113 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - for _, v := range opt.values { - throttledevice = append(throttledevice, v) - } - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "throttled-device" -} diff --git a/runconfig/opts/ulimit.go b/runconfig/opts/ulimit.go deleted file mode 100644 index 5adfe3085..000000000 --- a/runconfig/opts/ulimit.go +++ /dev/null @@ -1,57 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} diff --git a/runconfig/opts/ulimit_test.go b/runconfig/opts/ulimit_test.go deleted file mode 100644 index 0aa3facdf..000000000 --- a/runconfig/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/docker/go-units" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*units.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/runconfig/opts/weightdevice.go b/runconfig/opts/weightdevice.go deleted file mode 100644 index b3afd2213..000000000 --- a/runconfig/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/engine-api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "weighted-device" -} diff --git a/runconfig/streams.go b/runconfig/streams.go deleted file mode 100644 index 117fd89ae..000000000 --- a/runconfig/streams.go +++ /dev/null @@ -1,109 +0,0 @@ -package runconfig - -import ( - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/ioutils" -) - -// StreamConfig holds information about I/O streams managed together. -// -// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data -// to the standard input of the streamConfig's active process. -// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser -// which can be used to retrieve the standard output (and error) generated -// by the container's active process. The output (and error) are actually -// copied and delivered to all StdoutPipe and StderrPipe consumers, using -// a kind of "broadcaster". -type StreamConfig struct { - sync.WaitGroup - stdout *broadcaster.Unbuffered - stderr *broadcaster.Unbuffered - stdin io.ReadCloser - stdinPipe io.WriteCloser -} - -// NewStreamConfig creates a stream config and initializes -// the standard err and standard out to new unbuffered broadcasters. -func NewStreamConfig() *StreamConfig { - return &StreamConfig{ - stderr: new(broadcaster.Unbuffered), - stdout: new(broadcaster.Unbuffered), - } -} - -// Stdout returns the standard output in the configuration. -func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { - return streamConfig.stdout -} - -// Stderr returns the standard error in the configuration. -func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { - return streamConfig.stderr -} - -// Stdin returns the standard input in the configuration. -func (streamConfig *StreamConfig) Stdin() io.ReadCloser { - return streamConfig.stdin -} - -// StdinPipe returns an input writer pipe as an io.WriteCloser. -func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { - return streamConfig.stdinPipe -} - -// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new out pipe to the Stdout broadcaster. -func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - streamConfig.stdout.Add(bytesPipe) - return bytesPipe -} - -// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new err pipe to the Stderr broadcaster. -func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - streamConfig.stderr.Add(bytesPipe) - return bytesPipe -} - -// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. -func (streamConfig *StreamConfig) NewInputPipes() { - streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() -} - -// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. -func (streamConfig *StreamConfig) NewNopInputPipe() { - streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) -} - -// CloseStreams ensures that the configured streams are properly closed. -func (streamConfig *StreamConfig) CloseStreams() error { - var errors []string - - if streamConfig.stdin != nil { - if err := streamConfig.stdin.Close(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) - } - } - - if err := streamConfig.stdout.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) - } - - if err := streamConfig.stderr.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - - return nil -} diff --git a/utils/debug.go b/utils/debug.go deleted file mode 100644 index d20389112..000000000 --- a/utils/debug.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "os" - - "github.com/Sirupsen/logrus" -) - -// EnableDebug sets the DEBUG env var to true -// and makes the logger to log at debug level. -func EnableDebug() { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) -} - -// DisableDebug sets the DEBUG env var to false -// and makes the logger to log at info level. -func DisableDebug() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) -} - -// IsDebugEnabled checks whether the debug flag is set or not. -func IsDebugEnabled() bool { - return os.Getenv("DEBUG") != "" -} diff --git a/utils/debug_test.go b/utils/debug_test.go deleted file mode 100644 index 6f9c4dfbb..000000000 --- a/utils/debug_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package utils - -import ( - "os" - "testing" - - "github.com/Sirupsen/logrus" -) - -func TestEnableDebug(t *testing.T) { - defer func() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) - }() - EnableDebug() - if os.Getenv("DEBUG") != "1" { - t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) - } - if logrus.GetLevel() != logrus.DebugLevel { - t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) - } -} - -func TestDisableDebug(t *testing.T) { - DisableDebug() - if os.Getenv("DEBUG") != "" { - t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) - } - if logrus.GetLevel() != logrus.InfoLevel { - t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) - } -} - -func TestDebugEnabled(t *testing.T) { - EnableDebug() - if !IsDebugEnabled() { - t.Fatal("expected debug enabled, got false") - } - DisableDebug() - if IsDebugEnabled() { - t.Fatal("expected debug disabled, got true") - } -} diff --git a/utils/experimental.go b/utils/experimental.go deleted file mode 100644 index ceed0cb3f..000000000 --- a/utils/experimental.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build experimental - -package utils - -// ExperimentalBuild is a stub which always returns true for -// builds that include the "experimental" build tag -func ExperimentalBuild() bool { - return true -} diff --git a/utils/names.go b/utils/names.go deleted file mode 100644 index 8239c0de2..000000000 --- a/utils/names.go +++ /dev/null @@ -1,12 +0,0 @@ -package utils - -import "regexp" - -// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. -const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` - -// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. -var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) - -// RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. -var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/utils/process_unix.go b/utils/process_unix.go deleted file mode 100644 index bdb1b46b3..000000000 --- a/utils/process_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package utils - -import ( - "syscall" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := syscall.Kill(pid, syscall.Signal(0)) - if err == nil || err == syscall.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - syscall.Kill(pid, syscall.SIGKILL) -} diff --git a/utils/process_windows.go b/utils/process_windows.go deleted file mode 100644 index 03cb85519..000000000 --- a/utils/process_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package utils - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // return true - // } - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // p.Kill() - // } -} diff --git a/utils/stubs.go b/utils/stubs.go deleted file mode 100644 index 8a496d392..000000000 --- a/utils/stubs.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package utils - -// ExperimentalBuild is a stub which always returns false for -// builds that do not include the "experimental" build tag -func ExperimentalBuild() bool { - return false -} diff --git a/utils/templates/templates.go b/utils/templates/templates.go deleted file mode 100644 index 749da3d5a..000000000 --- a/utils/templates/templates.go +++ /dev/null @@ -1,33 +0,0 @@ -package templates - -import ( - "encoding/json" - "strings" - "text/template" -) - -// basicFunctions are the set of initial -// functions provided to every template. -var basicFunctions = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, - "split": strings.Split, - "join": strings.Join, - "title": strings.Title, - "lower": strings.ToLower, - "upper": strings.ToUpper, -} - -// Parse creates a new annonymous template with the basic functions -// and parses the given format. -func Parse(format string) (*template.Template, error) { - return NewParse("", format) -} - -// NewParse creates a new tagged template with the basic functions -// and parses the given format. -func NewParse(tag, format string) (*template.Template, error) { - return template.New(tag).Funcs(basicFunctions).Parse(format) -} diff --git a/utils/templates/templates_test.go b/utils/templates/templates_test.go deleted file mode 100644 index dd42901ae..000000000 --- a/utils/templates/templates_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package templates - -import ( - "bytes" - "testing" -) - -func TestParseStringFunctions(t *testing.T) { - tm, err := Parse(`{{join (split . ":") "/"}}`) - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "text:with:colon"); err != nil { - t.Fatal(err) - } - want := "text/with/colon" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} - -func TestNewParse(t *testing.T) { - tm, err := NewParse("foo", "this is a {{ . }}") - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "string"); err != nil { - t.Fatal(err) - } - want := "this is a string" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} diff --git a/utils/utils.go b/utils/utils.go deleted file mode 100644 index d3dd00abf..000000000 --- a/utils/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" -) - -var globalTestID string - -// TestDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func TestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = stringid.GenerateNonCryptoID()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// GetCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func GetCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -// ReplaceOrAppendEnvValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} diff --git a/utils/utils_test.go b/utils/utils_test.go deleted file mode 100644 index ab3911e8b..000000000 --- a/utils/utils_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import "testing" - -func TestReplaceAndAppendEnvVars(t *testing.T) { - var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} - ) - - env := ReplaceOrAppendEnvValues(d, o) - if len(env) != 2 { - t.Fatalf("expected len of 2 got %d", len(env)) - } - if env[0] != "HOME=/root" { - t.Fatalf("expected HOME=/root got '%s'", env[0]) - } - if env[1] != "TERM=xterm" { - t.Fatalf("expected TERM=xterm got '%s'", env[1]) - } -} diff --git a/vendor/src/bitbucket.org/ww/goautoneg/Makefile b/vendor/src/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee1730..000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/vendor/src/bitbucket.org/ww/goautoneg/README.txt b/vendor/src/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb6..000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/LICENSE b/vendor/src/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/src/github.com/Azure/go-ansiterm/README.md b/vendor/src/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index e25e38210..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. diff --git a/vendor/src/github.com/Azure/go-ansiterm/constants.go b/vendor/src/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/src/github.com/Azure/go-ansiterm/context.go b/vendor/src/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index 1bd6057da..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 4be35c5fd..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 2189eb6b6..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/escape_state.go b/vendor/src/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 7b1b9ad3f..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/event_handler.go b/vendor/src/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/ground_state.go b/vendor/src/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 24062d420..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser.go b/vendor/src/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 169f68dbe..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,136 +0,0 @@ -package ansiterm - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, - } - - parser.currState = getState(initialState, parser.stateMap) - - logger.Infof("CreateParser: parser %p", parser) - return parser -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - logger.Warning("newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index 8b69a67a5..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,103 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - logger.Infof("Parsed params: %v with length: %d", params, len(params)) - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - logger.Infof("getInts: %v", ints) - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 58750a2d2..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,122 +0,0 @@ -package ansiterm - -import ( - "fmt" -) - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - - logger.Infof("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/states.go b/vendor/src/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/utilities.go b/vendor/src/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index daf2f0696..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 462d92f8e..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index f015723ad..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 706d27057..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 4d858ed61..000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,726 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD -} - -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - return &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - logger.Info("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - logger.Info("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - logger.Infof("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - logger.Info("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/src/github.com/BurntSushi/toml/.gitignore b/vendor/src/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd380037..000000000 --- a/vendor/src/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/vendor/src/github.com/BurntSushi/toml/.travis.yml b/vendor/src/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 43caf6d02..000000000 --- a/vendor/src/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test - diff --git a/vendor/src/github.com/BurntSushi/toml/COMPATIBLE b/vendor/src/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938ca..000000000 --- a/vendor/src/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/vendor/src/github.com/BurntSushi/toml/COPYING b/vendor/src/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/vendor/src/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/Makefile b/vendor/src/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d3..000000000 --- a/vendor/src/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/src/github.com/BurntSushi/toml/README.md b/vendor/src/github.com/BurntSushi/toml/README.md deleted file mode 100644 index e861c0ca7..000000000 --- a/vendor/src/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/decode.go b/vendor/src/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 6c7d398b8..000000000 --- a/vendor/src/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,492 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -var e = fmt.Errorf - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, rvalue(v)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("Unsupported type '%s'.", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return mismatch(rv, "map", mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if rv.IsNil() { - rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanAddr() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) -} diff --git a/vendor/src/github.com/BurntSushi/toml/decode_meta.go b/vendor/src/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index ef6f545fa..000000000 --- a/vendor/src/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } else { - return k[i] - } -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/src/github.com/BurntSushi/toml/doc.go b/vendor/src/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe2680004..000000000 --- a/vendor/src/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/src/github.com/BurntSushi/toml/encode.go b/vendor/src/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 64e8c47e1..000000000 --- a/vendor/src/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,496 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") - errArrayNilElement = errors.New( - "can't encode array with nil element") - errNonString = errors.New( - "can't encode a map with non-string key type") - errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "TOML array element can't contain a table") - errNoKey = errors.New( - "top-level values must be a Go map or struct") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("Unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("Unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexporded fields - if f.PkgPath != "" { - continue - } - frv := rv.Field(i) - if f.Anonymous { - frv := eindirect(frv) - t := frv.Type() - if t.Kind() != reflect.Struct { - encPanic(errAnonNonStruct) - } - addFields(t, frv, f.Index) - } else if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - keyName := sft.Tag.Get("toml") - if keyName == "-" { - continue - } - if keyName == "" { - keyName = sft.Name - } - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } else { - return tomlArray - } - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/src/github.com/BurntSushi/toml/encoding_types.go b/vendor/src/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd60..000000000 --- a/vendor/src/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d04..000000000 --- a/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/src/github.com/BurntSushi/toml/lex.go b/vendor/src/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 219122857..000000000 --- a/vendor/src/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,874 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("Unexpected end of table name. (Table names cannot " + - "be empty.)") - case r == tableSep: - return lx.errorf("Unexpected table separator. (Table names cannot " + - "be empty.)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - case isWhitespace(r): - return lexTableNameStart - default: - return lexBareTableName - } -} - -// lexTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareTableName - case r == tableSep || r == tableEnd: - lx.backup() - lx.emitTrim(itemText) - return lexTableNameEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ - "instead.", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.emitTrim(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emitTrim(itemText) - return lexKeyEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new - // lines. - r := lx.next() - if isWhitespace(r) { - return lexSkip(lx, lexValue) - } - - switch { - case r == arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case r == stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case r == rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case r == 't': - return lexTrue - case r == 'f': - return lexFalse - case r == '-': - return lexNumberStart - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - case r == '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '\\': - return lexMultilineStringEscape - case r == stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - lx.next() - return lexMultilineString - } else { - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) - } -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ - "\\uXXXX and \\UXXXXXXXX.", r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\u', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either a (positive) integer, float or -// datetime. It assumes that NO negative sign has been consumed. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumberOrDate -} - -// lexNumberOrDate consumes either a (positive) integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '-': - if lx.pos-lx.start != 5 { - return lx.errorf("All ISO8601 dates must be in full Zulu form.") - } - return lexDateAfterYear - case isDigit(r): - return lexNumberOrDate - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. -// It assumes that "YYYY-" has already been consumed. -func lexDateAfterYear(lx *lexer) stateFn { - formats := []rune{ - // digits are '0'. - // everything else is direct equality. - '0', '0', '-', '0', '0', - 'T', - '0', '0', ':', '0', '0', ':', '0', '0', - 'Z', - } - for _, f := range formats { - r := lx.next() - if f == '0' { - if !isDigit(r) { - return lx.errorf("Expected digit in ISO8601 datetime, "+ - "but found %q instead.", r) - } - } else if f != r { - return lx.errorf("Expected %q in ISO8601 datetime, "+ - "but found %q instead.", f, r) - } - } - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that -// a negative sign has already been read, but that *no* digits have been -// consumed. lexNumberStart will move to the appropriate integer or float -// states. -func lexNumberStart(lx *lexer) stateFn { - // we MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - switch { - case isDigit(r): - return lexNumber - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloatStart starts the consumption of digits of a float after a '.'. -// Namely, at least one digit is required. -func lexFloatStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - return lx.errorf("Floats must have a digit after the '.', but got "+ - "%q instead.", r) - } - return lexFloat -} - -// lexFloat consumes the digits of a float after a '.'. -// Assumes that one digit has been consumed after a '.' already. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexConst consumes the s[1:] in s. It assumes that s[0] has already been -// consumed. -func lexConst(lx *lexer, s string) stateFn { - for i := range s[1:] { - if r := lx.next(); r != rune(s[i+1]) { - return lx.errorf("Expected %q, but found %q instead.", s[:i+1], - s[:i]+string(r)) - } - } - return nil -} - -// lexTrue consumes the "rue" in "true". It assumes that 't' has already -// been consumed. -func lexTrue(lx *lexer) stateFn { - if fn := lexConst(lx, "true"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexFalse consumes the "alse" in "false". It assumes that 'f' has already -// been consumed. -func lexFalse(lx *lexer) stateFn { - if fn := lexConst(lx, "false"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString: - return "String" - case itemRawString: - return "String" - case itemMultilineString: - return "String" - case itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/src/github.com/BurntSushi/toml/parse.go b/vendor/src/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index c6069be1f..000000000 --- a/vendor/src/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,498 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - num, err := strconv.ParseInt(it.val, 10, 64) - if err != nil { - // See comment below for floats describing why we make a - // distinction between a bug and a user error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - num, err := strconv.ParseFloat(it.val, 64) - if err != nil { - // Distinguish float values. Normally, it'd be a bug if the lexer - // provides an invalid float, but it's possible that the float is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - // - // This is also true for integers. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.bug("Expected float value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - t, err := time.Parse("2006-01-02T15:04:05Z", it.val) - if err != nil { - p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:len(s)] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - - // BUG(burntsushi) - // I honestly don't understand how this works. I can't seem - // to find a way to make this fail. I figured this would fail on invalid - // UTF-8 characters like U+DCFF, but it doesn't. - if !utf8.ValidString(string(rune(hex))) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/src/github.com/BurntSushi/toml/session.vim b/vendor/src/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be0..000000000 --- a/vendor/src/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/src/github.com/BurntSushi/toml/type_check.go b/vendor/src/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8afc1..000000000 --- a/vendor/src/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/src/github.com/BurntSushi/toml/type_fields.go b/vendor/src/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 7592f87a4..000000000 --- a/vendor/src/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,241 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - name := sf.Tag.Get("toml") - if name == "-" { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/src/github.com/Graylog2/go-gelf/LICENSE b/vendor/src/github.com/Graylog2/go-gelf/LICENSE deleted file mode 100644 index bc756ae36..000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright 2012 SocialCode - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go b/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go deleted file mode 100644 index ff719fc71..000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2012 SocialCode. All rights reserved. -// Use of this source code is governed by the MIT -// license that can be found in the LICENSE file. - -package gelf - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" -) - -type Reader struct { - mu sync.Mutex - conn net.Conn -} - -func NewReader(addr string) (*Reader, error) { - var err error - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) - } - - conn, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return nil, fmt.Errorf("ListenUDP: %s", err) - } - - r := new(Reader) - r.conn = conn - return r, nil -} - -func (r *Reader) Addr() string { - return r.conn.LocalAddr().String() -} - -// FIXME: this will discard data if p isn't big enough to hold the -// full message. -func (r *Reader) Read(p []byte) (int, error) { - msg, err := r.ReadMessage() - if err != nil { - return -1, err - } - - var data string - - if msg.Full == "" { - data = msg.Short - } else { - data = msg.Full - } - - return strings.NewReader(data).Read(p) -} - -func (r *Reader) ReadMessage() (*Message, error) { - cBuf := make([]byte, ChunkSize) - var ( - err error - n, length int - cid, ocid []byte - seq, total uint8 - cHead []byte - cReader io.Reader - chunks [][]byte - ) - - for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { - if n, err = r.conn.Read(cBuf); err != nil { - return nil, fmt.Errorf("Read: %s", err) - } - cHead, cBuf = cBuf[:2], cBuf[:n] - - if bytes.Equal(cHead, magicChunked) { - //fmt.Printf("chunked %v\n", cBuf[:14]) - cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] - if ocid != nil && !bytes.Equal(cid, ocid) { - return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) - } else if ocid == nil { - ocid = cid - chunks = make([][]byte, total) - } - n = len(cBuf) - chunkedHeaderLen - //fmt.Printf("setting chunks[%d]: %d\n", seq, n) - chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) - length += n - } else { //not chunked - if total > 0 { - return nil, fmt.Errorf("out-of-band message (not chunked)") - } - break - } - } - //fmt.Printf("\nchunks: %v\n", chunks) - - if length > 0 { - if cap(cBuf) < length { - cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) - } - cBuf = cBuf[:0] - for i := range chunks { - //fmt.Printf("appending %d %v\n", i, chunks[i]) - cBuf = append(cBuf, chunks[i]...) - } - cHead = cBuf[:2] - } - - // the data we get from the wire is compressed - if bytes.Equal(cHead, magicGzip) { - cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) - } else if cHead[0] == magicZlib[0] && - (int(cHead[0])*256+int(cHead[1]))%31 == 0 { - // zlib is slightly more complicated, but correct - cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) - } else { - // compliance with https://github.com/Graylog2/graylog2-server - // treating all messages as uncompressed if they are not gzip, zlib or - // chunked - cReader = bytes.NewReader(cBuf) - } - - if err != nil { - return nil, fmt.Errorf("NewReader: %s", err) - } - - msg := new(Message) - if err := json.NewDecoder(cReader).Decode(&msg); err != nil { - return nil, fmt.Errorf("json.Unmarshal: %s", err) - } - - return msg, nil -} diff --git a/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go deleted file mode 100644 index 90cdb9921..000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2012 SocialCode. All rights reserved. -// Use of this source code is governed by the MIT -// license that can be found in the LICENSE file. - -package gelf - -import ( - "bytes" - "compress/flate" - "compress/gzip" - "compress/zlib" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "net" - "os" - "path" - "runtime" - "strings" - "sync" - "time" -) - -// Writer implements io.Writer and is used to send both discrete -// messages to a graylog2 server, or data from a stream-oriented -// interface (like the functions in log). -type Writer struct { - mu sync.Mutex - conn net.Conn - hostname string - Facility string // defaults to current process name - CompressionLevel int // one of the consts from compress/flate - CompressionType CompressType -} - -// What compression type the writer should use when sending messages -// to the graylog2 server -type CompressType int - -const ( - CompressGzip CompressType = iota - CompressZlib - CompressNone -) - -// Message represents the contents of the GELF message. It is gzipped -// before sending. -type Message struct { - Version string `json:"version"` - Host string `json:"host"` - Short string `json:"short_message"` - Full string `json:"full_message,omitempty"` - TimeUnix float64 `json:"timestamp"` - Level int32 `json:"level,omitempty"` - Facility string `json:"facility,omitempty"` - Extra map[string]interface{} `json:"-"` - RawExtra json.RawMessage `json:"-"` -} - -// Used to control GELF chunking. Should be less than (MTU - len(UDP -// header)). -// -// TODO: generate dynamically using Path MTU Discovery? -const ( - ChunkSize = 1420 - chunkedHeaderLen = 12 - chunkedDataLen = ChunkSize - chunkedHeaderLen -) - -var ( - magicChunked = []byte{0x1e, 0x0f} - magicZlib = []byte{0x78} - magicGzip = []byte{0x1f, 0x8b} -) - -// Syslog severity levels -const ( - LOG_EMERG = int32(0) - LOG_ALERT = int32(1) - LOG_CRIT = int32(2) - LOG_ERR = int32(3) - LOG_WARNING = int32(4) - LOG_NOTICE = int32(5) - LOG_INFO = int32(6) - LOG_DEBUG = int32(7) -) - -// numChunks returns the number of GELF chunks necessary to transmit -// the given compressed buffer. -func numChunks(b []byte) int { - lenB := len(b) - if lenB <= ChunkSize { - return 1 - } - return len(b)/chunkedDataLen + 1 -} - -// New returns a new GELF Writer. This writer can be used to send the -// output of the standard Go log functions to a central GELF server by -// passing it to log.SetOutput() -func NewWriter(addr string) (*Writer, error) { - var err error - w := new(Writer) - w.CompressionLevel = flate.BestSpeed - - if w.conn, err = net.Dial("udp", addr); err != nil { - return nil, err - } - if w.hostname, err = os.Hostname(); err != nil { - return nil, err - } - - w.Facility = path.Base(os.Args[0]) - - return w, nil -} - -// writes the gzip compressed byte array to the connection as a series -// of GELF chunked messages. The header format is documented at -// https://github.com/Graylog2/graylog2-docs/wiki/GELF as: -// -// 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte -// total, chunk-data -func (w *Writer) writeChunked(zBytes []byte) (err error) { - b := make([]byte, 0, ChunkSize) - buf := bytes.NewBuffer(b) - nChunksI := numChunks(zBytes) - if nChunksI > 255 { - return fmt.Errorf("msg too large, would need %d chunks", nChunksI) - } - nChunks := uint8(nChunksI) - // use urandom to get a unique message id - msgId := make([]byte, 8) - n, err := io.ReadFull(rand.Reader, msgId) - if err != nil || n != 8 { - return fmt.Errorf("rand.Reader: %d/%s", n, err) - } - - bytesLeft := len(zBytes) - for i := uint8(0); i < nChunks; i++ { - buf.Reset() - // manually write header. Don't care about - // host/network byte order, because the spec only - // deals in individual bytes. - buf.Write(magicChunked) //magic - buf.Write(msgId) - buf.WriteByte(i) - buf.WriteByte(nChunks) - // slice out our chunk from zBytes - chunkLen := chunkedDataLen - if chunkLen > bytesLeft { - chunkLen = bytesLeft - } - off := int(i) * chunkedDataLen - chunk := zBytes[off : off+chunkLen] - buf.Write(chunk) - - // write this chunk, and make sure the write was good - n, err := w.conn.Write(buf.Bytes()) - if err != nil { - return fmt.Errorf("Write (chunk %d/%d): %s", i, - nChunks, err) - } - if n != len(buf.Bytes()) { - return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", - i, nChunks, n, len(buf.Bytes())) - } - - bytesLeft -= chunkLen - } - - if bytesLeft != 0 { - return fmt.Errorf("error: %d bytes left after sending", bytesLeft) - } - return nil -} - -// 1k bytes buffer by default -var bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, 1024)) - }, -} - -func newBuffer() *bytes.Buffer { - b := bufPool.Get().(*bytes.Buffer) - if b != nil { - b.Reset() - return b - } - return bytes.NewBuffer(nil) -} - -// WriteMessage sends the specified message to the GELF server -// specified in the call to New(). It assumes all the fields are -// filled out appropriately. In general, clients will want to use -// Write, rather than WriteMessage. -func (w *Writer) WriteMessage(m *Message) (err error) { - mBuf := newBuffer() - defer bufPool.Put(mBuf) - if err = m.MarshalJSONBuf(mBuf); err != nil { - return err - } - mBytes := mBuf.Bytes() - - var ( - zBuf *bytes.Buffer - zBytes []byte - ) - - var zw io.WriteCloser - switch w.CompressionType { - case CompressGzip: - zBuf = newBuffer() - defer bufPool.Put(zBuf) - zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) - case CompressZlib: - zBuf = newBuffer() - defer bufPool.Put(zBuf) - zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) - case CompressNone: - zBytes = mBytes - default: - panic(fmt.Sprintf("unknown compression type %d", - w.CompressionType)) - } - if zw != nil { - if err != nil { - return - } - if _, err = zw.Write(mBytes); err != nil { - zw.Close() - return - } - zw.Close() - zBytes = zBuf.Bytes() - } - - if numChunks(zBytes) > 1 { - return w.writeChunked(zBytes) - } - n, err := w.conn.Write(zBytes) - if err != nil { - return - } - if n != len(zBytes) { - return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) - } - - return nil -} - -// Close connection and interrupt blocked Read or Write operations -func (w *Writer) Close() error { - return w.conn.Close() -} - -/* -func (w *Writer) Alert(m string) (err error) -func (w *Writer) Close() error -func (w *Writer) Crit(m string) (err error) -func (w *Writer) Debug(m string) (err error) -func (w *Writer) Emerg(m string) (err error) -func (w *Writer) Err(m string) (err error) -func (w *Writer) Info(m string) (err error) -func (w *Writer) Notice(m string) (err error) -func (w *Writer) Warning(m string) (err error) -*/ - -// getCaller returns the filename and the line info of a function -// further down in the call stack. Passing 0 in as callDepth would -// return info on the function calling getCallerIgnoringLog, 1 the -// parent function, and so on. Any suffixes passed to getCaller are -// path fragments like "/pkg/log/log.go", and functions in the call -// stack from that file are ignored. -func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { - // bump by 1 to ignore the getCaller (this) stackframe - callDepth++ -outer: - for { - var ok bool - _, file, line, ok = runtime.Caller(callDepth) - if !ok { - file = "???" - line = 0 - break - } - - for _, s := range suffixesToIgnore { - if strings.HasSuffix(file, s) { - callDepth++ - continue outer - } - } - break - } - return -} - -func getCallerIgnoringLogMulti(callDepth int) (string, int) { - // the +1 is to ignore this (getCallerIgnoringLogMulti) frame - return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") -} - -// Write encodes the given string in a GELF message and sends it to -// the server specified in New(). -func (w *Writer) Write(p []byte) (n int, err error) { - - // 1 for the function that called us. - file, line := getCallerIgnoringLogMulti(1) - - // remove trailing and leading whitespace - p = bytes.TrimSpace(p) - - // If there are newlines in the message, use the first line - // for the short message and set the full message to the - // original input. If the input has no newlines, stick the - // whole thing in Short. - short := p - full := []byte("") - if i := bytes.IndexRune(p, '\n'); i > 0 { - short = p[:i] - full = p - } - - m := Message{ - Version: "1.1", - Host: w.hostname, - Short: string(short), - Full: string(full), - TimeUnix: float64(time.Now().Unix()), - Level: 6, // info - Facility: w.Facility, - Extra: map[string]interface{}{ - "_file": file, - "_line": line, - }, - } - - if err = w.WriteMessage(&m); err != nil { - return 0, err - } - - return len(p), nil -} - -func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error { - b, err := json.Marshal(m) - if err != nil { - return err - } - // write up until the final } - if _, err = buf.Write(b[:len(b)-1]); err != nil { - return err - } - if len(m.Extra) > 0 { - eb, err := json.Marshal(m.Extra) - if err != nil { - return err - } - // merge serialized message + serialized extra map - if err = buf.WriteByte(','); err != nil { - return err - } - // write serialized extra bytes, without enclosing quotes - if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil { - return err - } - } - - if len(m.RawExtra) > 0 { - if err := buf.WriteByte(','); err != nil { - return err - } - - // write serialized extra bytes, without enclosing quotes - if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil { - return err - } - } - - // write final closing quotes - return buf.WriteByte('}') -} - -func (m *Message) UnmarshalJSON(data []byte) error { - i := make(map[string]interface{}, 16) - if err := json.Unmarshal(data, &i); err != nil { - return err - } - for k, v := range i { - if k[0] == '_' { - if m.Extra == nil { - m.Extra = make(map[string]interface{}, 1) - } - m.Extra[k] = v - continue - } - switch k { - case "version": - m.Version = v.(string) - case "host": - m.Host = v.(string) - case "short_message": - m.Short = v.(string) - case "full_message": - m.Full = v.(string) - case "timestamp": - m.TimeUnix = v.(float64) - case "level": - m.Level = int32(v.(float64)) - case "facility": - m.Facility = v.(string) - } - } - return nil -} diff --git a/vendor/src/github.com/RackSec/srslog/.gitignore b/vendor/src/github.com/RackSec/srslog/.gitignore deleted file mode 100644 index ebf0f2e4e..000000000 --- a/vendor/src/github.com/RackSec/srslog/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.cover diff --git a/vendor/src/github.com/RackSec/srslog/.travis.yml b/vendor/src/github.com/RackSec/srslog/.travis.yml deleted file mode 100644 index 4e5c4f075..000000000 --- a/vendor/src/github.com/RackSec/srslog/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: required -dist: trusty -group: edge -language: go -go: -- 1.5 -before_install: - - pip install --user codecov -script: -- | - go get ./... - go test -v -coverprofile=coverage.txt -covermode=atomic - go vet -after_success: - - codecov -notifications: - slack: - secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= diff --git a/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md deleted file mode 100644 index 18ac49fc7..000000000 --- a/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of -fostering an open and welcoming community, we pledge to respect all people who -contribute through reporting issues, posting feature requests, updating -documentation, submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free -experience for everyone, regardless of level of experience, gender, gender -identity and expression, sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, such as physical or electronic - addresses, without explicit permission -* Other unethical or unprofessional conduct - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -By adopting this Code of Conduct, project maintainers commit themselves to -fairly and consistently applying these principles to every aspect of managing -this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting a project maintainer at [sirsean@gmail.com]. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. Maintainers are -obligated to maintain confidentiality with regard to the reporter of an -incident. - - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.3.0, available at -[http://contributor-covenant.org/version/1/3/0/][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/src/github.com/RackSec/srslog/LICENSE b/vendor/src/github.com/RackSec/srslog/LICENSE deleted file mode 100644 index 9269338fb..000000000 --- a/vendor/src/github.com/RackSec/srslog/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015 Rackspace. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/RackSec/srslog/README.md b/vendor/src/github.com/RackSec/srslog/README.md deleted file mode 100644 index 1ae1fd4ef..000000000 --- a/vendor/src/github.com/RackSec/srslog/README.md +++ /dev/null @@ -1,131 +0,0 @@ -[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) - -# srslog - -Go has a `syslog` package in the standard library, but it has the following -shortcomings: - -1. It doesn't have TLS support -2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) - -I agree that it doesn't need to be in the standard library. So, I've -followed Brad's suggestion and have made a separate project to handle syslog. - -This code was taken directly from the Go project as a base to start from. - -However, this _does_ have TLS support. - -# Usage - -Basic usage retains the same interface as the original `syslog` package. We -only added to the interface where required to support new functionality. - -Switch from the standard library: - -``` -import( - //"log/syslog" - syslog "github.com/RackSec/srslog" -) -``` - -You can still use it for local syslog: - -``` -w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") -``` - -Or to unencrypted UDP: - -``` -w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") -``` - -Or to unencrypted TCP: - -``` -w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") -``` - -But now you can also send messages via TLS-encrypted TCP: - -``` -w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") -``` - -And if you need more control over your TLS configuration : - -``` -pool := x509.NewCertPool() -serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") -if err != nil { - return nil, err -} -pool.AppendCertsFromPEM(serverCert) -config := tls.Config{ - RootCAs: pool, -} - -w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) -``` - -(Note that in both TLS cases, this uses a self-signed certificate, where the -remote syslog server has the keypair and the client has only the public key.) - -And then to write log messages, continue like so: - -``` -if err != nil { - log.Fatal("failed to connect to syslog:", err) -} -defer w.Close() - -w.Alert("this is an alert") -w.Crit("this is critical") -w.Err("this is an error") -w.Warning("this is a warning") -w.Notice("this is a notice") -w.Info("this is info") -w.Debug("this is debug") -w.Write([]byte("these are some bytes")) -``` - -# Generating TLS Certificates - -We've provided a script that you can use to generate a self-signed keypair: - -``` -pip install cryptography -python script/gen-certs.py -``` - -That outputs the public key and private key to standard out. Put those into -`.pem` files. (And don't put them into any source control. The certificate in -the `test` directory is used by the unit tests, and please do not actually use -it anywhere else.) - -# Running Tests - -Run the tests as usual: - -``` -go test -``` - -But we've also provided a test coverage script that will show you which -lines of code are not covered: - -``` -script/coverage --html -``` - -That will open a new browser tab showing coverage information. - -# License - -This project uses the New BSD License, the same as the Go project itself. - -# Code of Conduct - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. diff --git a/vendor/src/github.com/RackSec/srslog/constants.go b/vendor/src/github.com/RackSec/srslog/constants.go deleted file mode 100644 index 600801ee8..000000000 --- a/vendor/src/github.com/RackSec/srslog/constants.go +++ /dev/null @@ -1,68 +0,0 @@ -package srslog - -import ( - "errors" -) - -// Priority is a combination of the syslog facility and -// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity -// message from the FTP facility. The default severity is LOG_EMERG; -// the default facility is LOG_KERN. -type Priority int - -const severityMask = 0x07 -const facilityMask = 0xf8 - -const ( - // Severity. - - // From /usr/include/sys/syslog.h. - // These are the same on Linux, BSD, and OS X. - LOG_EMERG Priority = iota - LOG_ALERT - LOG_CRIT - LOG_ERR - LOG_WARNING - LOG_NOTICE - LOG_INFO - LOG_DEBUG -) - -const ( - // Facility. - - // From /usr/include/sys/syslog.h. - // These are the same up to LOG_FTP on Linux, BSD, and OS X. - LOG_KERN Priority = iota << 3 - LOG_USER - LOG_MAIL - LOG_DAEMON - LOG_AUTH - LOG_SYSLOG - LOG_LPR - LOG_NEWS - LOG_UUCP - LOG_CRON - LOG_AUTHPRIV - LOG_FTP - _ // unused - _ // unused - _ // unused - _ // unused - LOG_LOCAL0 - LOG_LOCAL1 - LOG_LOCAL2 - LOG_LOCAL3 - LOG_LOCAL4 - LOG_LOCAL5 - LOG_LOCAL6 - LOG_LOCAL7 -) - -func validatePriority(p Priority) error { - if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { - return errors.New("log/syslog: invalid priority") - } else { - return nil - } -} diff --git a/vendor/src/github.com/RackSec/srslog/dialer.go b/vendor/src/github.com/RackSec/srslog/dialer.go deleted file mode 100644 index 47a7b2bea..000000000 --- a/vendor/src/github.com/RackSec/srslog/dialer.go +++ /dev/null @@ -1,87 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "net" -) - -// dialerFunctionWrapper is a simple object that consists of a dialer function -// and its name. This is primarily for testing, so we can make sure that the -// getDialer method returns the correct dialer function. However, if you ever -// find that you need to check which dialer function you have, this would also -// be useful for you without having to use reflection. -type dialerFunctionWrapper struct { - Name string - Dialer func() (serverConn, string, error) -} - -// Call the wrapped dialer function and return its return values. -func (df dialerFunctionWrapper) Call() (serverConn, string, error) { - return df.Dialer() -} - -// getDialer returns a "dialer" function that can be called to connect to a -// syslog server. -// -// Each dialer function is responsible for dialing the remote host and returns -// a serverConn, the hostname (or a default if the Writer has not specified a -// hostname), and an error in case dialing fails. -// -// The reason for separate dialers is that different network types may need -// to dial their connection differently, yet still provide a net.Conn interface -// that you can use once they have dialed. Rather than an increasingly long -// conditional, we have a map of network -> dialer function (with a sane default -// value), and adding a new network type is as easy as writing the dialer -// function and adding it to the map. -func (w *Writer) getDialer() dialerFunctionWrapper { - dialers := map[string]dialerFunctionWrapper{ - "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, - "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, - } - dialer, ok := dialers[w.network] - if !ok { - dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} - } - return dialer -} - -// unixDialer uses the unixSyslog method to open a connection to the syslog -// daemon running on the local machine. -func (w *Writer) unixDialer() (serverConn, string, error) { - sc, err := unixSyslog() - hostname := w.hostname - if hostname == "" { - hostname = "localhost" - } - return sc, hostname, err -} - -// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network -// type. -func (w *Writer) tlsDialer() (serverConn, string, error) { - c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) - var sc serverConn - hostname := w.hostname - if err == nil { - sc = &netConn{conn: c} - if hostname == "" { - hostname = c.LocalAddr().String() - } - } - return sc, hostname, err -} - -// basicDialer is the most common dialer for syslog, and supports both TCP and -// UDP connections. -func (w *Writer) basicDialer() (serverConn, string, error) { - c, err := net.Dial(w.network, w.raddr) - var sc serverConn - hostname := w.hostname - if err == nil { - sc = &netConn{conn: c} - if hostname == "" { - hostname = c.LocalAddr().String() - } - } - return sc, hostname, err -} diff --git a/vendor/src/github.com/RackSec/srslog/formatter.go b/vendor/src/github.com/RackSec/srslog/formatter.go deleted file mode 100644 index 2a7462510..000000000 --- a/vendor/src/github.com/RackSec/srslog/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package srslog - -import ( - "fmt" - "os" - "time" -) - -// Formatter is a type of function that takes the consituent parts of a -// syslog message and returns a formatted string. A different Formatter is -// defined for each different syslog protocol we support. -type Formatter func(p Priority, hostname, tag, content string) string - -// DefaultFormatter is the original format supported by the Go syslog package, -// and is a non-compliant amalgamation of 3164 and 5424 that is intended to -// maximize compatibility. -func DefaultFormatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", - p, timestamp, hostname, tag, os.Getpid(), content) - return msg -} - -// UnixFormatter omits the hostname, because it is only used locally. -func UnixFormatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.Stamp) - msg := fmt.Sprintf("<%d>%s %s[%d]: %s", - p, timestamp, tag, os.Getpid(), content) - return msg -} - -// RFC3164Formatter provides an RFC 3164 compliant message. -func RFC3164Formatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.Stamp) - msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", - p, timestamp, hostname, tag, os.Getpid(), content) - return msg -} - -// RFC5424Formatter provides an RFC 5424 compliant message. -func RFC5424Formatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - pid := os.Getpid() - appName := os.Args[0] - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, appName, pid, tag, content) - return msg -} diff --git a/vendor/src/github.com/RackSec/srslog/framer.go b/vendor/src/github.com/RackSec/srslog/framer.go deleted file mode 100644 index ab46f0de7..000000000 --- a/vendor/src/github.com/RackSec/srslog/framer.go +++ /dev/null @@ -1,24 +0,0 @@ -package srslog - -import ( - "fmt" -) - -// Framer is a type of function that takes an input string (typically an -// already-formatted syslog message) and applies "message framing" to it. We -// have different framers because different versions of the syslog protocol -// and its transport requirements define different framing behavior. -type Framer func(in string) string - -// DefaultFramer does nothing, since there is no framing to apply. This is -// the original behavior of the Go syslog package, and is also typically used -// for UDP syslog. -func DefaultFramer(in string) string { - return in -} - -// RFC5425MessageLengthFramer prepends the message length to the front of the -// provided message, as defined in RFC 5425. -func RFC5425MessageLengthFramer(in string) string { - return fmt.Sprintf("%d %s", len(in), in) -} diff --git a/vendor/src/github.com/RackSec/srslog/net_conn.go b/vendor/src/github.com/RackSec/srslog/net_conn.go deleted file mode 100644 index 75e4c3ca1..000000000 --- a/vendor/src/github.com/RackSec/srslog/net_conn.go +++ /dev/null @@ -1,30 +0,0 @@ -package srslog - -import ( - "net" -) - -// netConn has an internal net.Conn and adheres to the serverConn interface, -// allowing us to send syslog messages over the network. -type netConn struct { - conn net.Conn -} - -// writeString formats syslog messages using time.RFC3339 and includes the -// hostname, and sends the message to the connection. -func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { - if framer == nil { - framer = DefaultFramer - } - if formatter == nil { - formatter = DefaultFormatter - } - formattedMessage := framer(formatter(p, hostname, tag, msg)) - _, err := n.conn.Write([]byte(formattedMessage)) - return err -} - -// close the network connection -func (n *netConn) close() error { - return n.conn.Close() -} diff --git a/vendor/src/github.com/RackSec/srslog/srslog.go b/vendor/src/github.com/RackSec/srslog/srslog.go deleted file mode 100644 index 4469d720c..000000000 --- a/vendor/src/github.com/RackSec/srslog/srslog.go +++ /dev/null @@ -1,100 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "log" - "os" -) - -// This interface allows us to work with both local and network connections, -// and enables Solaris support (see syslog_unix.go). -type serverConn interface { - writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error - close() error -} - -// New establishes a new connection to the system log daemon. Each -// write to the returned Writer sends a log message with the given -// priority and prefix. -func New(priority Priority, tag string) (w *Writer, err error) { - return Dial("", "", priority, tag) -} - -// Dial establishes a connection to a log daemon by connecting to -// address raddr on the specified network. Each write to the returned -// Writer sends a log message with the given facility, severity and -// tag. -// If network is empty, Dial will connect to the local syslog server. -func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { - return DialWithTLSConfig(network, raddr, priority, tag, nil) -} - -// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses certPath to load TLS certificates and configure -// the secure connection. -func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { - serverCert, err := ioutil.ReadFile(certPath) - if err != nil { - return nil, err - } - - return DialWithTLSCert(network, raddr, priority, tag, serverCert) -} - -// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses serverCert to load a TLS certificate -// and configure the secure connection. -func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(serverCert) - config := tls.Config{ - RootCAs: pool, - } - - return DialWithTLSConfig(network, raddr, priority, tag, &config) -} - -// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses tlsConfig to configure the secure connection. -func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { - if err := validatePriority(priority); err != nil { - return nil, err - } - - if tag == "" { - tag = os.Args[0] - } - hostname, _ := os.Hostname() - - w := &Writer{ - priority: priority, - tag: tag, - hostname: hostname, - network: network, - raddr: raddr, - tlsConfig: tlsConfig, - } - - w.Lock() - defer w.Unlock() - - err := w.connect() - if err != nil { - return nil, err - } - return w, err -} - -// NewLogger creates a log.Logger whose output is written to -// the system log service with the specified priority. The logFlag -// argument is the flag set passed through to log.New to create -// the Logger. -func NewLogger(p Priority, logFlag int) (*log.Logger, error) { - s, err := New(p, "") - if err != nil { - return nil, err - } - return log.New(s, "", logFlag), nil -} diff --git a/vendor/src/github.com/RackSec/srslog/srslog_unix.go b/vendor/src/github.com/RackSec/srslog/srslog_unix.go deleted file mode 100644 index a04d9396f..000000000 --- a/vendor/src/github.com/RackSec/srslog/srslog_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -package srslog - -import ( - "errors" - "io" - "net" -) - -// unixSyslog opens a connection to the syslog daemon running on the -// local machine using a Unix domain socket. This function exists because of -// Solaris support as implemented by gccgo. On Solaris you can not -// simply open a TCP connection to the syslog daemon. The gccgo -// sources have a syslog_solaris.go file that implements unixSyslog to -// return a type that satisfies the serverConn interface and simply calls the C -// library syslog function. -func unixSyslog() (conn serverConn, err error) { - logTypes := []string{"unixgram", "unix"} - logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} - for _, network := range logTypes { - for _, path := range logPaths { - conn, err := net.Dial(network, path) - if err != nil { - continue - } else { - return &localConn{conn: conn}, nil - } - } - } - return nil, errors.New("Unix syslog delivery error") -} - -// localConn adheres to the serverConn interface, allowing us to send syslog -// messages to the local syslog daemon over a Unix domain socket. -type localConn struct { - conn io.WriteCloser -} - -// writeString formats syslog messages using time.Stamp instead of time.RFC3339, -// and omits the hostname (because it is expected to be used locally). -func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { - if framer == nil { - framer = DefaultFramer - } - if formatter == nil { - formatter = UnixFormatter - } - _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) - return err -} - -// close the (local) network connection -func (n *localConn) close() error { - return n.conn.Close() -} diff --git a/vendor/src/github.com/RackSec/srslog/writer.go b/vendor/src/github.com/RackSec/srslog/writer.go deleted file mode 100644 index fdecaf61f..000000000 --- a/vendor/src/github.com/RackSec/srslog/writer.go +++ /dev/null @@ -1,164 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "strings" - "sync" -) - -// A Writer is a connection to a syslog server. -type Writer struct { - sync.Mutex // guards conn - - priority Priority - tag string - hostname string - network string - raddr string - tlsConfig *tls.Config - framer Framer - formatter Formatter - - conn serverConn -} - -// connect makes a connection to the syslog server. -// It must be called with w.mu held. -func (w *Writer) connect() (err error) { - if w.conn != nil { - // ignore err from close, it makes sense to continue anyway - w.conn.close() - w.conn = nil - } - - var conn serverConn - var hostname string - dialer := w.getDialer() - conn, hostname, err = dialer.Call() - if err == nil { - w.conn = conn - w.hostname = hostname - } - - return -} - -// SetFormatter changes the formatter function for subsequent messages. -func (w *Writer) SetFormatter(f Formatter) { - w.formatter = f -} - -// SetFramer changes the framer function for subsequent messages. -func (w *Writer) SetFramer(f Framer) { - w.framer = f -} - -// Write sends a log message to the syslog daemon using the default priority -// passed into `srslog.New` or the `srslog.Dial*` functions. -func (w *Writer) Write(b []byte) (int, error) { - return w.writeAndRetry(w.priority, string(b)) -} - -// Close closes a connection to the syslog daemon. -func (w *Writer) Close() error { - w.Lock() - defer w.Unlock() - - if w.conn != nil { - err := w.conn.close() - w.conn = nil - return err - } - return nil -} - -// Emerg logs a message with severity LOG_EMERG; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Emerg(m string) (err error) { - _, err = w.writeAndRetry(LOG_EMERG, m) - return err -} - -// Alert logs a message with severity LOG_ALERT; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Alert(m string) (err error) { - _, err = w.writeAndRetry(LOG_ALERT, m) - return err -} - -// Crit logs a message with severity LOG_CRIT; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Crit(m string) (err error) { - _, err = w.writeAndRetry(LOG_CRIT, m) - return err -} - -// Err logs a message with severity LOG_ERR; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Err(m string) (err error) { - _, err = w.writeAndRetry(LOG_ERR, m) - return err -} - -// Warning logs a message with severity LOG_WARNING; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Warning(m string) (err error) { - _, err = w.writeAndRetry(LOG_WARNING, m) - return err -} - -// Notice logs a message with severity LOG_NOTICE; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Notice(m string) (err error) { - _, err = w.writeAndRetry(LOG_NOTICE, m) - return err -} - -// Info logs a message with severity LOG_INFO; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Info(m string) (err error) { - _, err = w.writeAndRetry(LOG_INFO, m) - return err -} - -// Debug logs a message with severity LOG_DEBUG; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Debug(m string) (err error) { - _, err = w.writeAndRetry(LOG_DEBUG, m) - return err -} - -func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { - pr := (w.priority & facilityMask) | (p & severityMask) - - w.Lock() - defer w.Unlock() - - if w.conn != nil { - if n, err := w.write(pr, s); err == nil { - return n, err - } - } - if err := w.connect(); err != nil { - return 0, err - } - return w.write(pr, s) -} - -// write generates and writes a syslog formatted string. It formats the -// message based on the current Formatter and Framer. -func (w *Writer) write(p Priority, msg string) (int, error) { - // ensure it ends in a \n - if !strings.HasSuffix(msg, "\n") { - msg += "\n" - } - - err := w.conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) - if err != nil { - return 0, err - } - // Note: return the length of the input, not the number of - // bytes printed by Fprintf, because this must behave like - // an io.Writer. - return len(msg), nil -} diff --git a/vendor/src/github.com/agl/ed25519/LICENSE b/vendor/src/github.com/agl/ed25519/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/src/github.com/agl/ed25519/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/agl/ed25519/ed25519.go b/vendor/src/github.com/agl/ed25519/ed25519.go deleted file mode 100644 index 700938ddd..000000000 --- a/vendor/src/github.com/agl/ed25519/ed25519.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// http://ed25519.cr.yp.to/. -package ed25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto/sha512" - "crypto/subtle" - "io" - - "github.com/agl/ed25519/edwards25519" -) - -const ( - PublicKeySize = 32 - PrivateKeySize = 64 - SignatureSize = 64 -) - -// GenerateKey generates a public/private key pair using randomness from rand. -func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { - privateKey = new([64]byte) - publicKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - h := sha512.New() - h.Write(privateKey[:32]) - digest := h.Sum(nil) - - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest) - edwards25519.GeScalarMultBase(&A, &hBytes) - A.ToBytes(publicKey) - - copy(privateKey[32:], publicKey[:]) - return -} - -// Sign signs the message with privateKey and returns a signature. -func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := new([64]byte) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - return signature -} - -// Verify returns true iff sig is a valid signature of message by publicKey. -func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { - if sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - if !A.FromBytes(publicKey) { - return false - } - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/vendor/src/github.com/agl/ed25519/edwards25519/const.go b/vendor/src/github.com/agl/ed25519/edwards25519/const.go deleted file mode 100644 index ea5b77a71..000000000 --- a/vendor/src/github.com/agl/ed25519/edwards25519/const.go +++ /dev/null @@ -1,1411 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go b/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go deleted file mode 100644 index 184b4a859..000000000 --- a/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go +++ /dev/null @@ -1,2127 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements operations in GF(2**255-19) and on an -// Edwards curve that is isomorphic to curve25519. See -// http://ed25519.cr.yp.to/. -package edwards25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -func FeZero(fe *FieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func FeSub(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func FeCopy(dst, src *FieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - var x FieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - for i := range h { - h[i] = -f[i] - } -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 /* 1.4*2^29 */ - g2_19 := 19 * g2 /* 1.4*2^30; still ok */ - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.959375*2^30 - f6_19 := 19 * f6 // 1.959375*2^30 - f7_38 := 38 * f7 // 1.959375*2^30 - f8_19 := 19 * f8 // 1.959375*2^30 - f9_38 := 38 * f9 // 1.959375*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) == (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/vendor/src/github.com/armon/go-metrics/.gitignore b/vendor/src/github.com/armon/go-metrics/.gitignore deleted file mode 100755 index 00268614f..000000000 --- a/vendor/src/github.com/armon/go-metrics/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/armon/go-metrics/LICENSE b/vendor/src/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e54..000000000 --- a/vendor/src/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/armon/go-metrics/README.md b/vendor/src/github.com/armon/go-metrics/README.md deleted file mode 100644 index d9f46e85b..000000000 --- a/vendor/src/github.com/armon/go-metrics/README.md +++ /dev/null @@ -1,68 +0,0 @@ -go-metrics -========== - -This library provides a `metrics` package which can be used to instrument code, -expose application metrics, and profile runtime performance in a flexible manner. - -Sinks -===== - -The `metrics` package makes use of a `MetricSink` interface to support delivery -to any type of backend. Currently the following sinks are provided: - -* StatsiteSink : Sinks to a statsite instance (TCP) -* StatsdSink: Sinks to a statsd / statsite instance (UDP) -* InmemSink : Provides in-memory aggregation, can be used to export stats -* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. -* BlackholeSink : Sinks to nowhere - -In addition to the sinks, the `InmemSignal` can be used to catch a signal, -and dump a formatted output of recent metrics. For example, when a process gets -a SIGUSR1, it can dump to stderr recent performance metrics for debugging. - -Examples -======== - -Here is an example of using the package: - - func SlowMethod() { - // Profiling the runtime of a method - defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) - } - - // Configure a statsite sink as the global metrics sink - sink, _ := metrics.NewStatsiteSink("statsite:8125") - metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) - - // Emit a Key/Value pair - metrics.EmitKey([]string{"questions", "meaning of life"}, 42) - - -Here is an example of setting up an signal handler: - - // Setup the inmem sink and signal handler - inm := NewInmemSink(10*time.Second, time.Minute) - sig := DefaultInmemSignal(inm) - metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) - - // Run some code - inm.SetGauge([]string{"foo"}, 42) - inm.EmitKey([]string{"bar"}, 30) - - inm.IncrCounter([]string{"baz"}, 42) - inm.IncrCounter([]string{"baz"}, 1) - inm.IncrCounter([]string{"baz"}, 80) - - inm.AddSample([]string{"method", "wow"}, 42) - inm.AddSample([]string{"method", "wow"}, 100) - inm.AddSample([]string{"method", "wow"}, 22) - - .... - -When a signal comes in, output like the following will be dumped to stderr: - - [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 - [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 - [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 - [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 - diff --git a/vendor/src/github.com/armon/go-metrics/const_unix.go b/vendor/src/github.com/armon/go-metrics/const_unix.go deleted file mode 100644 index 31098dd57..000000000 --- a/vendor/src/github.com/armon/go-metrics/const_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - DefaultSignal = syscall.SIGUSR1 -) diff --git a/vendor/src/github.com/armon/go-metrics/const_windows.go b/vendor/src/github.com/armon/go-metrics/const_windows.go deleted file mode 100644 index 38136af3e..000000000 --- a/vendor/src/github.com/armon/go-metrics/const_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - // Windows has no SIGUSR1, use SIGBREAK - DefaultSignal = syscall.Signal(21) -) diff --git a/vendor/src/github.com/armon/go-metrics/inmem.go b/vendor/src/github.com/armon/go-metrics/inmem.go deleted file mode 100644 index 0749229bf..000000000 --- a/vendor/src/github.com/armon/go-metrics/inmem.go +++ /dev/null @@ -1,239 +0,0 @@ -package metrics - -import ( - "fmt" - "math" - "strings" - "sync" - "time" -) - -// InmemSink provides a MetricSink that does in-memory aggregation -// without sending metrics over a network. It can be embedded within -// an application to provide profiling information. -type InmemSink struct { - // How long is each aggregation interval - interval time.Duration - - // Retain controls how many metrics interval we keep - retain time.Duration - - // maxIntervals is the maximum length of intervals. - // It is retain / interval. - maxIntervals int - - // intervals is a slice of the retained intervals - intervals []*IntervalMetrics - intervalLock sync.RWMutex -} - -// IntervalMetrics stores the aggregated metrics -// for a specific interval -type IntervalMetrics struct { - sync.RWMutex - - // The start time of the interval - Interval time.Time - - // Gauges maps the key to the last set value - Gauges map[string]float32 - - // Points maps the string to the list of emitted values - // from EmitKey - Points map[string][]float32 - - // Counters maps the string key to a sum of the counter - // values - Counters map[string]*AggregateSample - - // Samples maps the key to an AggregateSample, - // which has the rolled up view of a sample - Samples map[string]*AggregateSample -} - -// NewIntervalMetrics creates a new IntervalMetrics for a given interval -func NewIntervalMetrics(intv time.Time) *IntervalMetrics { - return &IntervalMetrics{ - Interval: intv, - Gauges: make(map[string]float32), - Points: make(map[string][]float32), - Counters: make(map[string]*AggregateSample), - Samples: make(map[string]*AggregateSample), - } -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Sum float64 // The sum of values - SumSq float64 // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value -} - -// Computes a Stddev of the values -func (a *AggregateSample) Stddev() float64 { - num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) - div := float64(a.Count * (a.Count - 1)) - if div == 0 { - return 0 - } - return math.Sqrt(num / div) -} - -// Computes a mean of the values -func (a *AggregateSample) Mean() float64 { - if a.Count == 0 { - return 0 - } - return a.Sum / float64(a.Count) -} - -// Ingest is used to update a sample -func (a *AggregateSample) Ingest(v float64) { - a.Count++ - a.Sum += v - a.SumSq += (v * v) - if v < a.Min || a.Count == 1 { - a.Min = v - } - if v > a.Max || a.Count == 1 { - a.Max = v - } -} - -func (a *AggregateSample) String() string { - if a.Count == 0 { - return "Count: 0" - } else if a.Stddev() == 0 { - return fmt.Sprintf("Count: %d Sum: %0.3f", a.Count, a.Sum) - } else { - return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f", - a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum) - } -} - -// NewInmemSink is used to construct a new in-memory sink. -// Uses an aggregation interval and maximum retention period. -func NewInmemSink(interval, retain time.Duration) *InmemSink { - i := &InmemSink{ - interval: interval, - retain: retain, - maxIntervals: int(retain / interval), - } - i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) - return i -} - -func (i *InmemSink) SetGauge(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - intv.Gauges[k] = val -} - -func (i *InmemSink) EmitKey(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - vals := intv.Points[k] - intv.Points[k] = append(vals, val) -} - -func (i *InmemSink) IncrCounter(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Counters[k] - if agg == nil { - agg = &AggregateSample{} - intv.Counters[k] = agg - } - agg.Ingest(float64(val)) -} - -func (i *InmemSink) AddSample(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Samples[k] - if agg == nil { - agg = &AggregateSample{} - intv.Samples[k] = agg - } - agg.Ingest(float64(val)) -} - -// Data is used to retrieve all the aggregated metrics -// Intervals may be in use, and a read lock should be acquired -func (i *InmemSink) Data() []*IntervalMetrics { - // Get the current interval, forces creation - i.getInterval() - - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - intervals := make([]*IntervalMetrics, len(i.intervals)) - copy(intervals, i.intervals) - return intervals -} - -func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - return nil -} - -func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.Lock() - defer i.intervalLock.Unlock() - - // Check for an existing interval - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - - // Add the current interval - current := NewIntervalMetrics(intv) - i.intervals = append(i.intervals, current) - n++ - - // Truncate the intervals if they are too long - if n >= i.maxIntervals { - copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) - i.intervals = i.intervals[:i.maxIntervals] - } - return current -} - -// getInterval returns the current interval to write to -func (i *InmemSink) getInterval() *IntervalMetrics { - intv := time.Now().Truncate(i.interval) - if m := i.getExistingInterval(intv); m != nil { - return m - } - return i.createInterval(intv) -} - -// Flattens the key for formatting, removes spaces -func (i *InmemSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Replace(joined, " ", "_", -1) -} diff --git a/vendor/src/github.com/armon/go-metrics/inmem_signal.go b/vendor/src/github.com/armon/go-metrics/inmem_signal.go deleted file mode 100644 index 95d08ee10..000000000 --- a/vendor/src/github.com/armon/go-metrics/inmem_signal.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -// InmemSignal is used to listen for a given signal, and when received, -// to dump the current metrics from the InmemSink to an io.Writer -type InmemSignal struct { - signal syscall.Signal - inm *InmemSink - w io.Writer - sigCh chan os.Signal - - stop bool - stopCh chan struct{} - stopLock sync.Mutex -} - -// NewInmemSignal creates a new InmemSignal which listens for a given signal, -// and dumps the current metrics out to a writer -func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { - i := &InmemSignal{ - signal: sig, - inm: inmem, - w: w, - sigCh: make(chan os.Signal, 1), - stopCh: make(chan struct{}), - } - signal.Notify(i.sigCh, sig) - go i.run() - return i -} - -// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 -// and writes output to stderr. Windows uses SIGBREAK -func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { - return NewInmemSignal(inmem, DefaultSignal, os.Stderr) -} - -// Stop is used to stop the InmemSignal from listening -func (i *InmemSignal) Stop() { - i.stopLock.Lock() - defer i.stopLock.Unlock() - - if i.stop { - return - } - i.stop = true - close(i.stopCh) - signal.Stop(i.sigCh) -} - -// run is a long running routine that handles signals -func (i *InmemSignal) run() { - for { - select { - case <-i.sigCh: - i.dumpStats() - case <-i.stopCh: - return - } - } -} - -// dumpStats is used to dump the data to output writer -func (i *InmemSignal) dumpStats() { - buf := bytes.NewBuffer(nil) - - data := i.inm.Data() - // Skip the last period which is still being aggregated - for i := 0; i < len(data)-1; i++ { - intv := data[i] - intv.RLock() - for name, val := range intv.Gauges { - fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) - } - for name, vals := range intv.Points { - for _, val := range vals { - fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) - } - } - for name, agg := range intv.Counters { - fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) - } - for name, agg := range intv.Samples { - fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) - } - intv.RUnlock() - } - - // Write out the bytes - i.w.Write(buf.Bytes()) -} diff --git a/vendor/src/github.com/armon/go-metrics/metrics.go b/vendor/src/github.com/armon/go-metrics/metrics.go deleted file mode 100755 index b818e4182..000000000 --- a/vendor/src/github.com/armon/go-metrics/metrics.go +++ /dev/null @@ -1,115 +0,0 @@ -package metrics - -import ( - "runtime" - "time" -) - -func (m *Metrics) SetGauge(key []string, val float32) { - if m.HostName != "" && m.EnableHostname { - key = insert(0, m.HostName, key) - } - if m.EnableTypePrefix { - key = insert(0, "gauge", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.SetGauge(key, val) -} - -func (m *Metrics) EmitKey(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "kv", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.EmitKey(key, val) -} - -func (m *Metrics) IncrCounter(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "counter", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.IncrCounter(key, val) -} - -func (m *Metrics) AddSample(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "sample", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.AddSample(key, val) -} - -func (m *Metrics) MeasureSince(key []string, start time.Time) { - if m.EnableTypePrefix { - key = insert(0, "timer", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - now := time.Now() - elapsed := now.Sub(start) - msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) - m.sink.AddSample(key, msec) -} - -// Periodically collects runtime stats to publish -func (m *Metrics) collectStats() { - for { - time.Sleep(m.ProfileInterval) - m.emitRuntimeStats() - } -} - -// Emits various runtime statsitics -func (m *Metrics) emitRuntimeStats() { - // Export number of Goroutines - numRoutines := runtime.NumGoroutine() - m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) - - // Export memory stats - var stats runtime.MemStats - runtime.ReadMemStats(&stats) - m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) - m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) - m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) - m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) - m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) - m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) - m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) - - // Export info about the last few GC runs - num := stats.NumGC - - // Handle wrap around - if num < m.lastNumGC { - m.lastNumGC = 0 - } - - // Ensure we don't scan more than 256 - if num-m.lastNumGC >= 256 { - m.lastNumGC = num - 255 - } - - for i := m.lastNumGC; i < num; i++ { - pause := stats.PauseNs[i%256] - m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) - } - m.lastNumGC = num -} - -// Inserts a string value at an index into the slice -func insert(i int, v string, s []string) []string { - s = append(s, "") - copy(s[i+1:], s[i:]) - s[i] = v - return s -} diff --git a/vendor/src/github.com/armon/go-metrics/sink.go b/vendor/src/github.com/armon/go-metrics/sink.go deleted file mode 100755 index 0c240c2c4..000000000 --- a/vendor/src/github.com/armon/go-metrics/sink.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -// The MetricSink interface is used to transmit metrics information -// to an external system -type MetricSink interface { - // A Gauge should retain the last value it is set to - SetGauge(key []string, val float32) - - // Should emit a Key/Value pair for each call - EmitKey(key []string, val float32) - - // Counters should accumulate values - IncrCounter(key []string, val float32) - - // Samples are for timing information, where quantiles are used - AddSample(key []string, val float32) -} - -// BlackholeSink is used to just blackhole messages -type BlackholeSink struct{} - -func (*BlackholeSink) SetGauge(key []string, val float32) {} -func (*BlackholeSink) EmitKey(key []string, val float32) {} -func (*BlackholeSink) IncrCounter(key []string, val float32) {} -func (*BlackholeSink) AddSample(key []string, val float32) {} - -// FanoutSink is used to sink to fanout values to multiple sinks -type FanoutSink []MetricSink - -func (fh FanoutSink) SetGauge(key []string, val float32) { - for _, s := range fh { - s.SetGauge(key, val) - } -} - -func (fh FanoutSink) EmitKey(key []string, val float32) { - for _, s := range fh { - s.EmitKey(key, val) - } -} - -func (fh FanoutSink) IncrCounter(key []string, val float32) { - for _, s := range fh { - s.IncrCounter(key, val) - } -} - -func (fh FanoutSink) AddSample(key []string, val float32) { - for _, s := range fh { - s.AddSample(key, val) - } -} diff --git a/vendor/src/github.com/armon/go-metrics/start.go b/vendor/src/github.com/armon/go-metrics/start.go deleted file mode 100755 index 44113f100..000000000 --- a/vendor/src/github.com/armon/go-metrics/start.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import ( - "os" - "time" -) - -// Config is used to configure metrics settings -type Config struct { - ServiceName string // Prefixed with keys to seperate services - HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname - EnableHostname bool // Enable prefixing gauge values with hostname - EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) - EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") - TimerGranularity time.Duration // Granularity of timers. - ProfileInterval time.Duration // Interval to profile runtime metrics -} - -// Metrics represents an instance of a metrics sink that can -// be used to emit -type Metrics struct { - Config - lastNumGC uint32 - sink MetricSink -} - -// Shared global metrics instance -var globalMetrics *Metrics - -func init() { - // Initialize to a blackhole sink to avoid errors - globalMetrics = &Metrics{sink: &BlackholeSink{}} -} - -// DefaultConfig provides a sane default configuration -func DefaultConfig(serviceName string) *Config { - c := &Config{ - ServiceName: serviceName, // Use client provided service - HostName: "", - EnableHostname: true, // Enable hostname prefix - EnableRuntimeMetrics: true, // Enable runtime profiling - EnableTypePrefix: false, // Disable type prefix - TimerGranularity: time.Millisecond, // Timers are in milliseconds - ProfileInterval: time.Second, // Poll runtime every second - } - - // Try to get the hostname - name, _ := os.Hostname() - c.HostName = name - return c -} - -// New is used to create a new instance of Metrics -func New(conf *Config, sink MetricSink) (*Metrics, error) { - met := &Metrics{} - met.Config = *conf - met.sink = sink - - // Start the runtime collector - if conf.EnableRuntimeMetrics { - go met.collectStats() - } - return met, nil -} - -// NewGlobal is the same as New, but it assigns the metrics object to be -// used globally as well as returning it. -func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { - metrics, err := New(conf, sink) - if err == nil { - globalMetrics = metrics - } - return metrics, err -} - -// Proxy all the methods to the globalMetrics instance -func SetGauge(key []string, val float32) { - globalMetrics.SetGauge(key, val) -} - -func EmitKey(key []string, val float32) { - globalMetrics.EmitKey(key, val) -} - -func IncrCounter(key []string, val float32) { - globalMetrics.IncrCounter(key, val) -} - -func AddSample(key []string, val float32) { - globalMetrics.AddSample(key, val) -} - -func MeasureSince(key []string, start time.Time) { - globalMetrics.MeasureSince(key, start) -} diff --git a/vendor/src/github.com/armon/go-metrics/statsd.go b/vendor/src/github.com/armon/go-metrics/statsd.go deleted file mode 100644 index 65a5021a0..000000000 --- a/vendor/src/github.com/armon/go-metrics/statsd.go +++ /dev/null @@ -1,154 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // statsdMaxLen is the maximum size of a packet - // to send to statsd - statsdMaxLen = 1400 -) - -// StatsdSink provides a MetricSink that can be used -// with a statsite or statsd metrics server. It uses -// only UDP packets, while StatsiteSink uses TCP. -type StatsdSink struct { - addr string - metricQueue chan string -} - -// NewStatsdSink is used to create a new StatsdSink -func NewStatsdSink(addr string) (*StatsdSink, error) { - s := &StatsdSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsd -func (s *StatsdSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsdSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsdSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsdSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsdSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Create a buffer - buf := bytes.NewBuffer(nil) - - // Attempt to connect - sock, err = net.Dial("udp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsd! Err: %s", err) - goto WAIT - } - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Check if this would overflow the packet size - if len(metric)+buf.Len() > statsdMaxLen { - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error writing to statsd! Err: %s", err) - goto WAIT - } - } - - // Append to the buffer - buf.WriteString(metric) - - case <-ticker.C: - if buf.Len() == 0 { - continue - } - - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error flushing to statsd! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/src/github.com/armon/go-metrics/statsite.go b/vendor/src/github.com/armon/go-metrics/statsite.go deleted file mode 100755 index 68730139a..000000000 --- a/vendor/src/github.com/armon/go-metrics/statsite.go +++ /dev/null @@ -1,142 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // We force flush the statsite metrics after this period of - // inactivity. Prevents stats from getting stuck in a buffer - // forever. - flushInterval = 100 * time.Millisecond -) - -// StatsiteSink provides a MetricSink that can be used with a -// statsite metrics server -type StatsiteSink struct { - addr string - metricQueue chan string -} - -// NewStatsiteSink is used to create a new StatsiteSink -func NewStatsiteSink(addr string) (*StatsiteSink, error) { - s := &StatsiteSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsite -func (s *StatsiteSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsiteSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsiteSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsiteSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsiteSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - var buffered *bufio.Writer - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Attempt to connect - sock, err = net.Dial("tcp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsite! Err: %s", err) - goto WAIT - } - - // Create a buffered writer - buffered = bufio.NewWriter(sock) - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Try to send to statsite - _, err := buffered.Write([]byte(metric)) - if err != nil { - log.Printf("[ERR] Error writing to statsite! Err: %s", err) - goto WAIT - } - case <-ticker.C: - if err := buffered.Flush(); err != nil { - log.Printf("[ERR] Error flushing to statsite! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/src/github.com/armon/go-radix/.gitignore b/vendor/src/github.com/armon/go-radix/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/src/github.com/armon/go-radix/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/armon/go-radix/.travis.yml b/vendor/src/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6c..000000000 --- a/vendor/src/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/src/github.com/armon/go-radix/LICENSE b/vendor/src/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e67..000000000 --- a/vendor/src/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/armon/go-radix/README.md b/vendor/src/github.com/armon/go-radix/README.md deleted file mode 100644 index c054fe86c..000000000 --- a/vendor/src/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,36 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/src/github.com/armon/go-radix/radix.go b/vendor/src/github.com/armon/go-radix/radix.go deleted file mode 100644 index 8c963c914..000000000 --- a/vendor/src/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,467 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) replaceEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - if idx < num && n.edges[idx].label == e.label { - n.edges[idx].node = e.node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } else { - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.replaceEdge(edge{ - label: search[0], - node: child, - }) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } - return nil, false -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should merge this node - if len(n.edges) == 1 { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges - } - return leaf.val, true -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } else { - break - } - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index e50771f80..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index e2d333b84..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occured", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 8429470b9..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,100 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - dst.Set(reflect.New(e)) - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 59fa4a558..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index 4d2a01e8c..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,222 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, value := range values { - value := reflect.Indirect(value) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index fc38172fe..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,107 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index b6432f1a1..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index c8d0564d8..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,120 +0,0 @@ -package client - -import ( - "fmt" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint, SigningRegion string -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers, - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 43a3676b7..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,90 +0,0 @@ -package client - -import ( - "math/rand" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// service.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - minTime = 500 - } - - retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { - retryCount = 8 - } - - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - if r.HTTPResponse.StatusCode >= 500 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true - } - return r.IsErrorThrottle() -} - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 4778056dd..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,12 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/config.go b/vendor/src/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index bfaa15203..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,358 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -// UseServiceDefaultRetries instructs the config to use the service's own default -// number of retries. This will be the default action if Config.MaxRetries -// is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the {defaults.DefaultConfig} structure. -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retreive - // credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to - // a chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. - // https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experiance issues - // with proxies or thrid party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations compatible - // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible - // will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with accelerate - // enabled. If the bucket is not enabled for accelerate an error will be returned. - // The bucket name must be DNS compatible to also work with accelerate. - S3UseAccelerate *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata - // client to create a new http.Client. This options is only meaningful if you're not - // already using a custom HTTP client with the SDK. Enabled by default. - // - // Must be set and provided to the session.New() in order to disable the EC2Metadata - // overriding the timeout for default credentials chain. - // - // Example: - // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - SleepDelay func(time.Duration) -} - -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. -// -// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) -// -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index cff5c5c8a..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,369 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 8456e29b5..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,152 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be aded to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - } -}} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b1557..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 857311f64..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 7b8ebf5f9..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewCredentials(&EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "sync" - "time" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) -// // Access public S3 buckets. -// -// @readonly -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Refresh returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// A Credentials provides synchronous safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - m sync.Mutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index aa9d689a0..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,178 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshalling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 96655bc46..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,77 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - // - // @readonly - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - // - // @readonly - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d2..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 7fb7cbf0d..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/go-ini/ini" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) - } - - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) - } - - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.Key("aws_session_token") - - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") - } - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 6f075604e..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,48 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - s.Value.ProviderName = StaticProviderName - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 12be1a5d7..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), - ExpiryWindow: 5 * time.Minute, - }, - }}) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 669c813a0..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,140 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshalling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshalling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index 5b4379dbd..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - } - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index 576636168..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index db87188e2..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 5279c19c0..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,187 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - n.list = append([]NamedHandler{}, l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = []NamedHandler{} -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - l.list = append(l.list, n) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) - } - } - l.list = newlist -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index a4087f20e..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - // Cancel will be deprecated in 1.7 and will be replaced with Context - Cancel: r.Cancel, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go deleted file mode 100644 index 75da021ef..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index da6396d2d..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,49 +0,0 @@ -package request - -import ( - "io" - "sync" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.RWMutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, 0) - - reader.buf = buf - return reader -} - -// Close is a thread-safe close. Uses the write lock. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read using a read lock. -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.RLock() - defer o.lock.RUnlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index ce8087024..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,329 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - - built bool -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator -} - -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - p := operation.HTTPPath - if p == "" { - p = "/" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + p) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = newOffsetReader(reader, 0) - r.Body = reader -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.NotHoist = false - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request retuning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -func (r *Request) Send() error { - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - var body io.ReadCloser - if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { - body = reader.CloseAndCopy(r.BodyStart) - } else { - if r.Config.Logger != nil { - r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") - } - r.Body.Seek(r.BodyStart, 0) - body = ioutil.NopCloser(r.Body) - } - - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - // Closing response body. Since we are setting a new request to send off, this - // response will get squashed and leaked. - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if strings.Contains(r.Error.Error(), "net/http: request canceled") { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.UnmarshalError.Run(r) - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 2939ec473..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,104 +0,0 @@ -package request - -import ( - "reflect" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} - -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 -} - -// nextPageTokens returns the tokens to use when asking for the next page of -// data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { - tokens = append(tokens, nil) - } - } - if !tokenAdded { - return nil - } - - return tokens -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -func (r *Request) NextPage() *Request { - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 8cc8b015a..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,101 +0,0 @@ -package request - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once - "TooManyRequestsException": {}, // Lambda functions -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) - } - } - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) - } - } - return false -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 2520286b7..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,234 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 6bc8f1be9..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package session provides a way to create service clients with shared configuration -// and handlers. -// -// Generally this package should be used instead of the `defaults` package. -// -// A session should be used to share configurations and request handlers between multiple -// service clients. When service clients need specific configuration aws.Config can be -// used to provide additional configuration directly to the service client. -package session - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the session concurrently. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided Configs -// on top of the SDK's default configurations. Once the session is created it -// can be mutated to modify Configs or Handlers. The session is safe to be read -// concurrently, but it should not be written to concurrently. -// -// Example: -// // Create a session with the default config and request handlers. -// sess := session.New() -// -// // Create a session with a custom region -// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) -// -// // Create a session, and add additional handlers for all service -// // clients created with the session to inherit. Adds logging handler. -// sess := session.New() -// sess.Handlers.Send.PushFront(func(r *request.Request) { -// // Log every request made and its payload -// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) -// }) -// -// // Create a S3 client instance from a session -// sess := session.New() -// svc := s3.New(sess) -func New(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - return s -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current session, coping the config -// and handlers. If any additional configs are provided they will be merged -// on top of the session's copied config. -// -// Example: -// // Create a copy of the current session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -// -// Example: -// sess := session.New() -// s3.New(sess) -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), serviceName, - aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: endpoint, - SigningRegion: signingRegion, - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index fa014b49e..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,106 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/version.go b/vendor/src/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index 84c62becd..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.1.30" diff --git a/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE b/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go deleted file mode 100644 index 2b279e659..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import ( - "fmt" - "regexp" - "strings" -) - -// NormalizeEndpoint takes and endpoint and service API information to return a -// normalized endpoint and signing region. If the endpoint is not an empty string -// the service name and region will be used to look up the service's API endpoint. -// If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { - if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) - } - - return AddScheme(endpoint, disableSSL), "" -} - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { - derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - break - } - } - - return AddScheme(endpoint, disableSSL), signingRegion -} - -// Regular expression to determine if the endpoint string is prefixed with a scheme. -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. -func AddScheme(endpoint string, disableSSL bool) string { - if endpoint != "" && !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index 5f4991c2b..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go deleted file mode 100644 index e995315ab..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ /dev/null @@ -1,88 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/data.iot": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/waf": { - Endpoint: "waf.amazonaws.com", - SigningRegion: "us-east-1", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "cn-north-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - }, -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff9..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 7ad674278..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package jsonutil provides JSON serialisation of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(value, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - var sv sortedValues = value.MapKeys() - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - switch value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - buf.WriteString(strconv.FormatBool(value.Bool())) - case reflect.Int64: - buf.WriteString(strconv.FormatInt(value.Int(), 10)) - case reflect.Float64: - buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) - default: - switch value.Type() { - case timeType: - converted := value.Interface().(time.Time) - buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) - case byteSliceType: - if !value.IsNil() { - converted := value.Interface().([]byte) - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for _, r := range s { - if r == '"' { - buf.WriteString(`\"`) - } else if r == '\\' { - buf.WriteString(`\\`) - } else if r == '\b' { - buf.WriteString(`\b`) - } else if r == '\f' { - buf.WriteString(`\f`) - } else if r == '\r' { - buf.WriteString(`\r`) - } else if r == '\t' { - buf.WriteString(`\t`) - } else if r == '\n' { - buf.WriteString(`\n`) - } else if r < 32 { - fmt.Fprintf(buf, "\\u%0.4x", r) - } else { - buf.WriteRune(r) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index fea535613..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,213 +0,0 @@ -package jsonutil - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "reflect" - "time" -) - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - b, err := ioutil.ReadAll(stream) - if err != nil { - return err - } - - if len(b) == 0 { - return nil - } - - if err := json.Unmarshal(b, &out); err != nil { - return err - } - - return unmarshalAny(reflect.ValueOf(v), out, "") -} - -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return unmarshalStruct(value, data, tag) - case "list": - return unmarshalList(value, data, tag) - case "map": - return unmarshalMap(value, data, tag) - default: - return unmarshalScalar(value, data, tag) - } -} - -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - errf := func() error { - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - default: - return errf() - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return errf() - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return errf() - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go deleted file mode 100644 index 7aff0e0fa..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package jsonrpc provides JSON RPC utilities for serialisation of AWS -// requests and responses. -package jsonrpc - -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go - -import ( - "encoding/json" - "io/ioutil" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -var emptyJSON = []byte("{}") - -// BuildHandler is a named request handler for building jsonrpc protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} - -// Build builds a JSON payload for a JSON RPC request. -func Build(req *request.Request) { - var buf []byte - var err error - if req.ParamsFilled() { - buf, err = jsonutil.BuildJSON(req.Params) - if err != nil { - req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) - return - } - } else { - buf = emptyJSON - } - - if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { - req.SetBufferBody(buf) - } - - if req.ClientInfo.TargetPrefix != "" { - target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name - req.HTTPRequest.Header.Add("X-Amz-Target", target) - } - if req.ClientInfo.JSONVersion != "" { - jsonVersion := req.ClientInfo.JSONVersion - req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) - } -} - -// Unmarshal unmarshals a response for a JSON RPC service. -func Unmarshal(req *request.Request) { - defer req.HTTPResponse.Body.Close() - if req.DataFilled() { - err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) - } - } - return -} - -// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. -func UnmarshalMeta(req *request.Request) { - rest.UnmarshalMeta(req) -} - -// UnmarshalError unmarshals an error response for a JSON RPC service. -func UnmarshalError(req *request.Request) { - defer req.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) - return - } - if len(bodyBytes) == 0 { - req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", req.HTTPResponse.Status, nil), - req.HTTPResponse.StatusCode, - "", - ) - return - } - var jsonErr jsonErrorResponse - if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) - return - } - - codes := strings.SplitN(jsonErr.Code, "#", 2) - req.Error = awserr.NewRequestFailure( - awserr.New(codes[len(codes)-1], jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 5f412516d..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,256 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value) { - query := r.HTTPRequest.URL.Query() - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if m.Kind() == reflect.Ptr { - m = m.Elem() - } - if !m.IsValid() { - continue - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) - case "querystring": - err = buildQueryString(query, m, name) - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - - } - - header.Add(prefix+key.String(), str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - uri := u.Path - uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) - uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) - u.Path = uri - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func updatePath(url *url.URL, urlPath string) { - scheme, query := url.Scheme, url.RawQuery - - hasSlash := strings.HasSuffix(urlPath, "/") - - // clean up path - urlPath = path.Clean(urlPath) - if hasSlash && !strings.HasSuffix(urlPath, "/") { - urlPath += "/" - } - - // get formatted URL minus scheme so we can build this into Opaque - url.Scheme, url.Path, url.RawQuery = "", "", "" - s := url.String() - url.Scheme = scheme - url.RawQuery = query - - // build opaque URI - url.Opaque = s + urlPath -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value) (string, error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - var str string - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e1..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 2cba1d9aa..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,198 +0,0 @@ -package rest - -import ( - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) - } -} - -func unmarshalBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) - } - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) - } - default: - switch payload.Type().String() { - case "io.ReadSeeker": - payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) - case "aws.ReadSeekCloser", "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } -} - -func unmarshalLocationElements(r *request.Request, v reflect.Value) { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) - case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - } - } - if r.Error != nil { - return - } - } -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { - out[k[len(prefix):]] = &v[0] - } - } - r.Set(reflect.ValueOf(out)) - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index da1a68111..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,21 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go deleted file mode 100644 index 244c86da0..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go deleted file mode 100644 index 476580056..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go +++ /dev/null @@ -1,465 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string - notHoist bool - signedHeaderVals http.Header -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *request.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Config.Credentials, - Debug: req.Config.LogLevel.Value(), - Logger: req.Config.Logger, - notHoist: req.NotHoist, - } - - req.Error = s.sign() - req.Time = s.Time - req.SignedHeaderVals = s.signedHeaderVals -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return nil - } - v4.Time = time.Now() - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - - unsignedHeaders := v4.Request.Header - if v4.isPresign { - if !v4.notHoist { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - v4.Query[k] = urlValues[k] - } - } - } - - v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if v4.signedHeaderVals == nil { - v4.signedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok { - // include additional values - v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - v4.signedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.signedHeaderVals[k], ",") - } - } - - v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - stripped := "" - found := false - str = strings.TrimSpace(str) - for _, c := range str { - if !found && c == ' ' { - stripped += string(c) - found = true - } else if c != ' ' { - stripped += string(c) - found = false - } - } - vals[i] = stripped - } - return vals -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go deleted file mode 100644 index d5be83323..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ /dev/null @@ -1,3141 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. -package cloudwatchlogs - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opCancelExportTask = "CancelExportTask" - -// CancelExportTaskRequest generates a request for the CancelExportTask operation. -func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { - op := &request.Operation{ - Name: opCancelExportTask, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CancelExportTaskInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CancelExportTaskOutput{} - req.Data = output - return -} - -// Cancels an export task if it is in PENDING or RUNNING state. -func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { - req, out := c.CancelExportTaskRequest(input) - err := req.Send() - return out, err -} - -const opCreateExportTask = "CreateExportTask" - -// CreateExportTaskRequest generates a request for the CreateExportTask operation. -func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { - op := &request.Operation{ - Name: opCreateExportTask, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateExportTaskInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateExportTaskOutput{} - req.Data = output - return -} - -// Creates an ExportTask which allows you to efficiently export data from a -// Log Group to your Amazon S3 bucket. -// -// This is an asynchronous call. If all the required information is provided, -// this API will initiate an export task and respond with the task Id. Once -// started, DescribeExportTasks can be used to get the status of an export task. -// You can only have one active (RUNNING or PENDING) export task at a time, -// per account. -// -// You can export logs from multiple log groups or multiple time ranges to -// the same Amazon S3 bucket. To separate out log data for each export task, -// you can specify a prefix that will be used as the Amazon S3 key prefix for -// all exported objects. -func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { - req, out := c.CreateExportTaskRequest(input) - err := req.Send() - return out, err -} - -const opCreateLogGroup = "CreateLogGroup" - -// CreateLogGroupRequest generates a request for the CreateLogGroup operation. -func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { - op := &request.Operation{ - Name: opCreateLogGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateLogGroupInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CreateLogGroupOutput{} - req.Data = output - return -} - -// Creates a new log group with the specified name. The name of the log group -// must be unique within a region for an AWS account. You can create up to 500 -// log groups per account. -// -// You must use the following guidelines when naming a log group: Log group -// names can be between 1 and 512 characters long. Allowed characters are a-z, -// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). -func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { - req, out := c.CreateLogGroupRequest(input) - err := req.Send() - return out, err -} - -const opCreateLogStream = "CreateLogStream" - -// CreateLogStreamRequest generates a request for the CreateLogStream operation. -func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { - op := &request.Operation{ - Name: opCreateLogStream, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateLogStreamInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CreateLogStreamOutput{} - req.Data = output - return -} - -// Creates a new log stream in the specified log group. The name of the log -// stream must be unique within the log group. There is no limit on the number -// of log streams that can exist in a log group. -// -// You must use the following guidelines when naming a log stream: Log stream -// names can be between 1 and 512 characters long. The ':' colon character is -// not allowed. -func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { - req, out := c.CreateLogStreamRequest(input) - err := req.Send() - return out, err -} - -const opDeleteDestination = "DeleteDestination" - -// DeleteDestinationRequest generates a request for the DeleteDestination operation. -func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { - op := &request.Operation{ - Name: opDeleteDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteDestinationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteDestinationOutput{} - req.Data = output - return -} - -// Deletes the destination with the specified name and eventually disables all -// the subscription filters that publish to it. This will not delete the physical -// resource encapsulated by the destination. -func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { - req, out := c.DeleteDestinationRequest(input) - err := req.Send() - return out, err -} - -const opDeleteLogGroup = "DeleteLogGroup" - -// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. -func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { - op := &request.Operation{ - Name: opDeleteLogGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteLogGroupInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteLogGroupOutput{} - req.Data = output - return -} - -// Deletes the log group with the specified name and permanently deletes all -// the archived log events associated with it. -func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { - req, out := c.DeleteLogGroupRequest(input) - err := req.Send() - return out, err -} - -const opDeleteLogStream = "DeleteLogStream" - -// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. -func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { - op := &request.Operation{ - Name: opDeleteLogStream, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteLogStreamInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteLogStreamOutput{} - req.Data = output - return -} - -// Deletes a log stream and permanently deletes all the archived log events -// associated with it. -func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { - req, out := c.DeleteLogStreamRequest(input) - err := req.Send() - return out, err -} - -const opDeleteMetricFilter = "DeleteMetricFilter" - -// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. -func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { - op := &request.Operation{ - Name: opDeleteMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteMetricFilterOutput{} - req.Data = output - return -} - -// Deletes a metric filter associated with the specified log group. -func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { - req, out := c.DeleteMetricFilterRequest(input) - err := req.Send() - return out, err -} - -const opDeleteRetentionPolicy = "DeleteRetentionPolicy" - -// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. -func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { - op := &request.Operation{ - Name: opDeleteRetentionPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteRetentionPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteRetentionPolicyOutput{} - req.Data = output - return -} - -// Deletes the retention policy of the specified log group. Log events would -// not expire if they belong to log groups without a retention policy. -func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { - req, out := c.DeleteRetentionPolicyRequest(input) - err := req.Send() - return out, err -} - -const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" - -// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. -func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { - op := &request.Operation{ - Name: opDeleteSubscriptionFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSubscriptionFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteSubscriptionFilterOutput{} - req.Data = output - return -} - -// Deletes a subscription filter associated with the specified log group. -func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { - req, out := c.DeleteSubscriptionFilterRequest(input) - err := req.Send() - return out, err -} - -const opDescribeDestinations = "DescribeDestinations" - -// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. -func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { - op := &request.Operation{ - Name: opDescribeDestinations, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeDestinationsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeDestinationsOutput{} - req.Data = output - return -} - -// Returns all the destinations that are associated with the AWS account making -// the request. The list returned in the response is ASCII-sorted by destination -// name. -// -// By default, this operation returns up to 50 destinations. If there are -// more destinations to list, the response would contain a nextToken value in -// the response body. You can also limit the number of destinations returned -// in the response by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { - req, out := c.DescribeDestinationsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeDestinationsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeDestinationsOutput), lastPage) - }) -} - -const opDescribeExportTasks = "DescribeExportTasks" - -// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. -func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { - op := &request.Operation{ - Name: opDescribeExportTasks, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeExportTasksInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeExportTasksOutput{} - req.Data = output - return -} - -// Returns all the export tasks that are associated with the AWS account making -// the request. The export tasks can be filtered based on TaskId or TaskStatus. -// -// By default, this operation returns up to 50 export tasks that satisfy the -// specified filters. If there are more export tasks to list, the response would -// contain a nextToken value in the response body. You can also limit the number -// of export tasks returned in the response by specifying the limit parameter -// in the request. -func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { - req, out := c.DescribeExportTasksRequest(input) - err := req.Send() - return out, err -} - -const opDescribeLogGroups = "DescribeLogGroups" - -// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. -func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { - op := &request.Operation{ - Name: opDescribeLogGroups, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeLogGroupsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeLogGroupsOutput{} - req.Data = output - return -} - -// Returns all the log groups that are associated with the AWS account making -// the request. The list returned in the response is ASCII-sorted by log group -// name. -// -// By default, this operation returns up to 50 log groups. If there are more -// log groups to list, the response would contain a nextToken value in the response -// body. You can also limit the number of log groups returned in the response -// by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { - req, out := c.DescribeLogGroupsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLogGroupsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLogGroupsOutput), lastPage) - }) -} - -const opDescribeLogStreams = "DescribeLogStreams" - -// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. -func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { - op := &request.Operation{ - Name: opDescribeLogStreams, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeLogStreamsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeLogStreamsOutput{} - req.Data = output - return -} - -// Returns all the log streams that are associated with the specified log group. -// The list returned in the response is ASCII-sorted by log stream name. -// -// By default, this operation returns up to 50 log streams. If there are more -// log streams to list, the response would contain a nextToken value in the -// response body. You can also limit the number of log streams returned in the -// response by specifying the limit parameter in the request. This operation -// has a limit of five transactions per second, after which transactions are -// throttled. -func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { - req, out := c.DescribeLogStreamsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLogStreamsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLogStreamsOutput), lastPage) - }) -} - -const opDescribeMetricFilters = "DescribeMetricFilters" - -// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. -func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { - op := &request.Operation{ - Name: opDescribeMetricFilters, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeMetricFiltersInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeMetricFiltersOutput{} - req.Data = output - return -} - -// Returns all the metrics filters associated with the specified log group. -// The list returned in the response is ASCII-sorted by filter name. -// -// By default, this operation returns up to 50 metric filters. If there are -// more metric filters to list, the response would contain a nextToken value -// in the response body. You can also limit the number of metric filters returned -// in the response by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { - req, out := c.DescribeMetricFiltersRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeMetricFiltersRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeMetricFiltersOutput), lastPage) - }) -} - -const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" - -// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. -func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { - op := &request.Operation{ - Name: opDescribeSubscriptionFilters, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeSubscriptionFiltersInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeSubscriptionFiltersOutput{} - req.Data = output - return -} - -// Returns all the subscription filters associated with the specified log group. -// The list returned in the response is ASCII-sorted by filter name. -// -// By default, this operation returns up to 50 subscription filters. If there -// are more subscription filters to list, the response would contain a nextToken -// value in the response body. You can also limit the number of subscription -// filters returned in the response by specifying the limit parameter in the -// request. -func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { - req, out := c.DescribeSubscriptionFiltersRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeSubscriptionFiltersRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) - }) -} - -const opFilterLogEvents = "FilterLogEvents" - -// FilterLogEventsRequest generates a request for the FilterLogEvents operation. -func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { - op := &request.Operation{ - Name: opFilterLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &FilterLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &FilterLogEventsOutput{} - req.Data = output - return -} - -// Retrieves log events, optionally filtered by a filter pattern from the specified -// log group. You can provide an optional time range to filter the results on -// the event timestamp. You can limit the streams searched to an explicit list -// of logStreamNames. -// -// By default, this operation returns as much matching log events as can fit -// in a response size of 1MB, up to 10,000 log events, or all the events found -// within a time-bounded scan window. If the response includes a nextToken, -// then there is more data to search, and the search can be resumed with a new -// request providing the nextToken. The response will contain a list of searchedLogStreams -// that contains information about which streams were searched in the request -// and whether they have been searched completely or require further pagination. -// The limit parameter in the request. can be used to specify the maximum number -// of events to return in a page. -func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { - req, out := c.FilterLogEventsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.FilterLogEventsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*FilterLogEventsOutput), lastPage) - }) -} - -const opGetLogEvents = "GetLogEvents" - -// GetLogEventsRequest generates a request for the GetLogEvents operation. -func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { - op := &request.Operation{ - Name: opGetLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextForwardToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &GetLogEventsOutput{} - req.Data = output - return -} - -// Retrieves log events from the specified log stream. You can provide an optional -// time range to filter the results on the event timestamp. -// -// By default, this operation returns as much log events as can fit in a response -// size of 1MB, up to 10,000 log events. The response will always include a -// nextForwardToken and a nextBackwardToken in the response body. You can use -// any of these tokens in subsequent GetLogEvents requests to paginate through -// events in either forward or backward direction. You can also limit the number -// of log events returned in the response by specifying the limit parameter -// in the request. -func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { - req, out := c.GetLogEventsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.GetLogEventsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*GetLogEventsOutput), lastPage) - }) -} - -const opPutDestination = "PutDestination" - -// PutDestinationRequest generates a request for the PutDestination operation. -func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { - op := &request.Operation{ - Name: opPutDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutDestinationInput{} - } - - req = c.newRequest(op, input, output) - output = &PutDestinationOutput{} - req.Data = output - return -} - -// Creates or updates a Destination. A destination encapsulates a physical resource -// (such as a Kinesis stream) and allows you to subscribe to a real-time stream -// of log events of a different account, ingested through PutLogEvents requests. -// Currently, the only supported physical resource is a Amazon Kinesis stream -// belonging to the same account as the destination. -// -// A destination controls what is written to its Amazon Kinesis stream through -// an access policy. By default, PutDestination does not set any access policy -// with the destination, which means a cross-account user will not be able to -// call PutSubscriptionFilter against this destination. To enable that, the -// destination owner must call PutDestinationPolicy after PutDestination. -func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { - req, out := c.PutDestinationRequest(input) - err := req.Send() - return out, err -} - -const opPutDestinationPolicy = "PutDestinationPolicy" - -// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. -func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { - op := &request.Operation{ - Name: opPutDestinationPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutDestinationPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutDestinationPolicyOutput{} - req.Data = output - return -} - -// Creates or updates an access policy associated with an existing Destination. -// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) -// that is used to authorize claims to register a subscription filter against -// a given destination. -func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { - req, out := c.PutDestinationPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutLogEvents = "PutLogEvents" - -// PutLogEventsRequest generates a request for the PutLogEvents operation. -func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { - op := &request.Operation{ - Name: opPutLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &PutLogEventsOutput{} - req.Data = output - return -} - -// Uploads a batch of log events to the specified log stream. -// -// Every PutLogEvents request must include the sequenceToken obtained from -// the response of the previous request. An upload in a newly created log stream -// does not require a sequenceToken. -// -// The batch of events must satisfy the following constraints: The maximum -// batch size is 1,048,576 bytes, and this size is calculated as the sum of -// all event messages in UTF-8, plus 26 bytes for each log event. None of the -// log events in the batch can be more than 2 hours in the future. None of the -// log events in the batch can be older than 14 days or the retention period -// of the log group. The log events in the batch must be in chronological ordered -// by their timestamp. The maximum number of log events in a batch is 10,000. -// A batch of log events in a single PutLogEvents request cannot span more than -// 24 hours. Otherwise, the PutLogEvents operation will fail. -func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { - req, out := c.PutLogEventsRequest(input) - err := req.Send() - return out, err -} - -const opPutMetricFilter = "PutMetricFilter" - -// PutMetricFilterRequest generates a request for the PutMetricFilter operation. -func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { - op := &request.Operation{ - Name: opPutMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutMetricFilterOutput{} - req.Data = output - return -} - -// Creates or updates a metric filter and associates it with the specified log -// group. Metric filters allow you to configure rules to extract metric data -// from log events ingested through PutLogEvents requests. -// -// The maximum number of metric filters that can be associated with a log -// group is 100. -func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { - req, out := c.PutMetricFilterRequest(input) - err := req.Send() - return out, err -} - -const opPutRetentionPolicy = "PutRetentionPolicy" - -// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. -func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { - op := &request.Operation{ - Name: opPutRetentionPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutRetentionPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutRetentionPolicyOutput{} - req.Data = output - return -} - -// Sets the retention of the specified log group. A retention policy allows -// you to configure the number of days you want to retain log events in the -// specified log group. -func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { - req, out := c.PutRetentionPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutSubscriptionFilter = "PutSubscriptionFilter" - -// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. -func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { - op := &request.Operation{ - Name: opPutSubscriptionFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutSubscriptionFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutSubscriptionFilterOutput{} - req.Data = output - return -} - -// Creates or updates a subscription filter and associates it with the specified -// log group. Subscription filters allow you to subscribe to a real-time stream -// of log events ingested through PutLogEvents requests and have them delivered -// to a specific destination. Currently, the supported destinations are: An -// Amazon Kinesis stream belonging to the same account as the subscription filter, -// for same-account delivery. A logical destination (used via an ARN of Destination) -// belonging to a different account, for cross-account delivery. An Amazon -// Kinesis Firehose stream belonging to the same account as the subscription -// filter, for same-account delivery. An AWS Lambda function belonging to -// the same account as the subscription filter, for same-account delivery. -// -// -// Currently there can only be one subscription filter associated with a log -// group. -func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { - req, out := c.PutSubscriptionFilterRequest(input) - err := req.Send() - return out, err -} - -const opTestMetricFilter = "TestMetricFilter" - -// TestMetricFilterRequest generates a request for the TestMetricFilter operation. -func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { - op := &request.Operation{ - Name: opTestMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TestMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - output = &TestMetricFilterOutput{} - req.Data = output - return -} - -// Tests the filter pattern of a metric filter against a sample of log event -// messages. You can use this operation to validate the correctness of a metric -// filter pattern. -func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { - req, out := c.TestMetricFilterRequest(input) - err := req.Send() - return out, err -} - -type CancelExportTaskInput struct { - _ struct{} `type:"structure"` - - // Id of the export task to cancel. - TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CancelExportTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelExportTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelExportTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} - if s.TaskId == nil { - invalidParams.Add(request.NewErrParamRequired("TaskId")) - } - if s.TaskId != nil && len(*s.TaskId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CancelExportTaskOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CancelExportTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelExportTaskOutput) GoString() string { - return s.String() -} - -type CreateExportTaskInput struct { - _ struct{} `type:"structure"` - - // Name of Amazon S3 bucket to which the log data will be exported. - // - // Note: Only buckets in the same AWS region are supported. - Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` - - // Prefix that will be used as the start of Amazon S3 key for every object exported. - // If not specified, this defaults to 'exportedlogs'. - DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. It indicates the start time of the range for the request. Events - // with a timestamp prior to this time will not be exported. - From *int64 `locationName:"from" type:"long" required:"true"` - - // The name of the log group to export. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Will only export log streams that match the provided logStreamNamePrefix. - // If you don't specify a value, no prefix filter is applied. - LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` - - // The name of the export task. - TaskName *string `locationName:"taskName" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. It indicates the end time of the range for the request. Events - // with a timestamp later than this time will not be exported. - To *int64 `locationName:"to" type:"long" required:"true"` -} - -// String returns the string representation -func (s CreateExportTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateExportTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateExportTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Destination != nil && len(*s.Destination) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) - } - if s.From == nil { - invalidParams.Add(request.NewErrParamRequired("From")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) - } - if s.TaskName != nil && len(*s.TaskName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) - } - if s.To == nil { - invalidParams.Add(request.NewErrParamRequired("To")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateExportTaskOutput struct { - _ struct{} `type:"structure"` - - // Id of the export task that got created. - TaskId *string `locationName:"taskId" min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateExportTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateExportTaskOutput) GoString() string { - return s.String() -} - -type CreateLogGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to create. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateLogGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLogGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateLogGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateLogGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogGroupOutput) GoString() string { - return s.String() -} - -type CreateLogStreamInput struct { - _ struct{} `type:"structure"` - - // The name of the log group under which the log stream is to be created. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to create. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateLogStreamInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogStreamInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLogStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateLogStreamOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateLogStreamOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogStreamOutput) GoString() string { - return s.String() -} - -type DeleteDestinationInput struct { - _ struct{} `type:"structure"` - - // The name of destination to delete. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteDestinationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDestinationOutput) GoString() string { - return s.String() -} - -type DeleteLogGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteLogGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLogGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteLogGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteLogGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogGroupOutput) GoString() string { - return s.String() -} - -type DeleteLogStreamInput struct { - _ struct{} `type:"structure"` - - // The name of the log group under which the log stream to delete belongs. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to delete. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteLogStreamInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogStreamInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLogStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteLogStreamOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteLogStreamOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogStreamOutput) GoString() string { - return s.String() -} - -type DeleteMetricFilterInput struct { - _ struct{} `type:"structure"` - - // The name of the metric filter to delete. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // The name of the log group that is associated with the metric filter to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteMetricFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMetricFilterOutput) GoString() string { - return s.String() -} - -type DeleteRetentionPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the log group that is associated with the retention policy to - // delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteRetentionPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteRetentionPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRetentionPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteRetentionPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteRetentionPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteRetentionPolicyOutput) GoString() string { - return s.String() -} - -type DeleteSubscriptionFilterInput struct { - _ struct{} `type:"structure"` - - // The name of the subscription filter to delete. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // The name of the log group that is associated with the subscription filter - // to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteSubscriptionFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSubscriptionFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteSubscriptionFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSubscriptionFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionFilterOutput) GoString() string { - return s.String() -} - -type DescribeDestinationsInput struct { - _ struct{} `type:"structure"` - - // Will only return destinations that match the provided destinationNamePrefix. - // If you don't specify a value, no prefix is applied. - DestinationNamePrefix *string `min:"1" type:"string"` - - // The maximum number of results to return. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeDestinationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDestinationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDestinationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} - if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeDestinationsOutput struct { - _ struct{} `type:"structure"` - - Destinations []*Destination `locationName:"destinations" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeDestinationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDestinationsOutput) GoString() string { - return s.String() -} - -type DescribeExportTasksInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeExportTasks - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // All export tasks that matches the specified status code will be returned. - // This can return zero or more export tasks. - StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` - - // Export task that matches the specified task Id will be returned. This can - // result in zero or one export task. - TaskId *string `locationName:"taskId" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeExportTasksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeExportTasksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeExportTasksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.TaskId != nil && len(*s.TaskId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeExportTasksOutput struct { - _ struct{} `type:"structure"` - - // A list of export tasks. - ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeExportTasksOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeExportTasksOutput) GoString() string { - return s.String() -} - -type DescribeLogGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // Will only return log groups that match the provided logGroupNamePrefix. If - // you don't specify a value, no prefix filter is applied. - LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeLogGroups - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogGroupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeLogGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeLogGroupsOutput struct { - _ struct{} `type:"structure"` - - // A list of log groups. - LogGroups []*LogGroup `locationName:"logGroups" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogGroupsOutput) GoString() string { - return s.String() -} - -type DescribeLogStreamsInput struct { - _ struct{} `type:"structure"` - - // If set to true, results are returned in descending order. If you don't specify - // a value or set it to false, results are returned in ascending order. - Descending *bool `locationName:"descending" type:"boolean"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which log streams are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Will only return log streams that match the provided logStreamNamePrefix. - // If you don't specify a value, no prefix filter is applied. - LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeLogStreams - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // Specifies what to order the returned log streams by. Valid arguments are - // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results - // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot - // also contain a logStreamNamePrefix. - OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` -} - -// String returns the string representation -func (s DescribeLogStreamsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogStreamsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeLogStreamsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeLogStreamsOutput struct { - _ struct{} `type:"structure"` - - // A list of log streams. - LogStreams []*LogStream `locationName:"logStreams" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogStreamsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogStreamsOutput) GoString() string { - return s.String() -} - -type DescribeMetricFiltersInput struct { - _ struct{} `type:"structure"` - - // Will only return metric filters that match the provided filterNamePrefix. - // If you don't specify a value, no prefix filter is applied. - FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which metric filters are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeMetricFilters - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMetricFiltersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMetricFiltersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeMetricFiltersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} - if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeMetricFiltersOutput struct { - _ struct{} `type:"structure"` - - MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMetricFiltersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMetricFiltersOutput) GoString() string { - return s.String() -} - -type DescribeSubscriptionFiltersInput struct { - _ struct{} `type:"structure"` - - // Will only return subscription filters that match the provided filterNamePrefix. - // If you don't specify a value, no prefix filter is applied. - FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` - - // The maximum number of results to return. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which subscription filters are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeSubscriptionFiltersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionFiltersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeSubscriptionFiltersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} - if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeSubscriptionFiltersOutput struct { - _ struct{} `type:"structure"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` -} - -// String returns the string representation -func (s DescribeSubscriptionFiltersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionFiltersOutput) GoString() string { - return s.String() -} - -// A cross account destination that is the recipient of subscription log events. -type Destination struct { - _ struct{} `type:"structure"` - - // An IAM policy document that governs which AWS accounts can create subscription - // filters against this destination. - AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` - - // ARN of this destination. - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC specifying when this destination was created. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // Name of the destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string"` - - // A role for impersonation for delivering log events to the target. - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` - - // ARN of the physical target where the log events will be delivered (eg. ARN - // of a Kinesis stream). - TargetArn *string `locationName:"targetArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -// Represents an export task. -type ExportTask struct { - _ struct{} `type:"structure"` - - // Name of Amazon S3 bucket to which the log data was exported. - Destination *string `locationName:"destination" min:"1" type:"string"` - - // Prefix that was used as the start of Amazon S3 key for every object exported. - DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` - - // Execution info about the export task. - ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. - From *int64 `locationName:"from" type:"long"` - - // The name of the log group from which logs data was exported. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - // Status of the export task. - Status *ExportTaskStatus `locationName:"status" type:"structure"` - - // Id of the export task. - TaskId *string `locationName:"taskId" min:"1" type:"string"` - - // The name of the export task. - TaskName *string `locationName:"taskName" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. Events with a timestamp later than this time are not exported. - To *int64 `locationName:"to" type:"long"` -} - -// String returns the string representation -func (s ExportTask) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTask) GoString() string { - return s.String() -} - -// Represents the status of an export task. -type ExportTaskExecutionInfo struct { - _ struct{} `type:"structure"` - - // A point in time when the export task got completed. - CompletionTime *int64 `locationName:"completionTime" type:"long"` - - // A point in time when the export task got created. - CreationTime *int64 `locationName:"creationTime" type:"long"` -} - -// String returns the string representation -func (s ExportTaskExecutionInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTaskExecutionInfo) GoString() string { - return s.String() -} - -// Represents the status of an export task. -type ExportTaskStatus struct { - _ struct{} `type:"structure"` - - // Status code of the export task. - Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` - - // Status message related to the code. - Message *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s ExportTaskStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTaskStatus) GoString() string { - return s.String() -} - -type FilterLogEventsInput struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. If provided, events with a timestamp later than this time are - // not returned. - EndTime *int64 `locationName:"endTime" type:"long"` - - // A valid CloudWatch Logs filter pattern to use for filtering the response. - // If not provided, all the events are matched. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - // If provided, the API will make a best effort to provide responses that contain - // events from multiple log streams within the log group interleaved in a single - // response. If not provided, all the matched log events in the first log stream - // will be searched first, then those in the next log stream, etc. - Interleaved *bool `locationName:"interleaved" type:"boolean"` - - // The maximum number of events to return in a page of results. Default is 10,000 - // events. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The name of the log group to query. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Optional list of log stream names within the specified log group to search. - // Defaults to all the log streams in the log group. - LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` - - // A pagination token obtained from a FilterLogEvents response to continue paginating - // the FilterLogEvents results. This token is omitted from the response when - // there are no other events to display. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. If provided, events with a timestamp prior to this time are - // not returned. - StartTime *int64 `locationName:"startTime" type:"long"` -} - -// String returns the string representation -func (s FilterLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FilterLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type FilterLogEventsOutput struct { - _ struct{} `type:"structure"` - - // A list of FilteredLogEvent objects representing the matched events from the - // request. - Events []*FilteredLogEvent `locationName:"events" type:"list"` - - // A pagination token obtained from a FilterLogEvents response to continue paginating - // the FilterLogEvents results. This token is omitted from the response when - // there are no other events to display. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // A list of SearchedLogStream objects indicating which log streams have been - // searched in this request and whether each has been searched completely or - // still has more to be paginated. - SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` -} - -// String returns the string representation -func (s FilterLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterLogEventsOutput) GoString() string { - return s.String() -} - -// Represents a matched event from a FilterLogEvents request. -type FilteredLogEvent struct { - _ struct{} `type:"structure"` - - // A unique identifier for this event. - EventId *string `locationName:"eventId" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - IngestionTime *int64 `locationName:"ingestionTime" type:"long"` - - // The name of the log stream this event belongs to. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - // The data contained in the log event. - Message *string `locationName:"message" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long"` -} - -// String returns the string representation -func (s FilteredLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilteredLogEvent) GoString() string { - return s.String() -} - -type GetLogEventsInput struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - EndTime *int64 `locationName:"endTime" type:"long"` - - // The maximum number of log events returned in the response. If you don't specify - // a value, the request would return as many log events as can fit in a response - // size of 1MB, up to 10,000 log events. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The name of the log group to query. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to query. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the nextForwardToken or nextBackwardToken - // fields in the response of the previous GetLogEvents request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // If set to true, the earliest log events would be returned first. The default - // is false (the latest log events are returned first). - StartFromHead *bool `locationName:"startFromHead" type:"boolean"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - StartTime *int64 `locationName:"startTime" type:"long"` -} - -// String returns the string representation -func (s GetLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetLogEventsOutput struct { - _ struct{} `type:"structure"` - - Events []*OutputLogEvent `locationName:"events" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLogEventsOutput) GoString() string { - return s.String() -} - -// A log event is a record of some activity that was recorded by the application -// or resource being monitored. The log event record that CloudWatch Logs understands -// contains two properties: the timestamp of when the event occurred, and the -// raw event message. -type InputLogEvent struct { - _ struct{} `type:"structure"` - - Message *string `locationName:"message" min:"1" type:"string" required:"true"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` -} - -// String returns the string representation -func (s InputLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputLogEvent) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InputLogEvent) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(*s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.Timestamp == nil { - invalidParams.Add(request.NewErrParamRequired("Timestamp")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type LogGroup struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - // The number of metric filters associated with the log group. - MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` - - // Specifies the number of days you want to retain log events in the specified - // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, - // 365, 400, 545, 731, 1827, 3653. - RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` - - StoredBytes *int64 `locationName:"storedBytes" type:"long"` -} - -// String returns the string representation -func (s LogGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LogGroup) GoString() string { - return s.String() -} - -// A log stream is sequence of log events from a single emitter of logs. -type LogStream struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` - - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - StoredBytes *int64 `locationName:"storedBytes" type:"long"` - - // A string token used for making PutLogEvents requests. A sequenceToken can - // only be used once, and PutLogEvents requests must include the sequenceToken - // obtained from the response of the previous request. - UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s LogStream) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LogStream) GoString() string { - return s.String() -} - -// Metric filters can be used to express how CloudWatch Logs would extract metric -// observations from ingested log events and transform them to metric data in -// a CloudWatch metric. -type MetricFilter struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // A name for a metric or subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` -} - -// String returns the string representation -func (s MetricFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricFilter) GoString() string { - return s.String() -} - -type MetricFilterMatchRecord struct { - _ struct{} `type:"structure"` - - EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` - - EventNumber *int64 `locationName:"eventNumber" type:"long"` - - ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` -} - -// String returns the string representation -func (s MetricFilterMatchRecord) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricFilterMatchRecord) GoString() string { - return s.String() -} - -type MetricTransformation struct { - _ struct{} `type:"structure"` - - // The name of the CloudWatch metric to which the monitored log information - // should be published. For example, you may publish to a metric called ErrorCount. - MetricName *string `locationName:"metricName" type:"string" required:"true"` - - // The destination namespace of the new CloudWatch metric. - MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` - - // What to publish to the metric. For example, if you're counting the occurrences - // of a particular term like "Error", the value will be "1" for each occurrence. - // If you're counting the bytes transferred the published value will be the - // value in the log event. - MetricValue *string `locationName:"metricValue" type:"string" required:"true"` -} - -// String returns the string representation -func (s MetricTransformation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricTransformation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricTransformation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.MetricNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) - } - if s.MetricValue == nil { - invalidParams.Add(request.NewErrParamRequired("MetricValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type OutputLogEvent struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - IngestionTime *int64 `locationName:"ingestionTime" type:"long"` - - Message *string `locationName:"message" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long"` -} - -// String returns the string representation -func (s OutputLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputLogEvent) GoString() string { - return s.String() -} - -type PutDestinationInput struct { - _ struct{} `type:"structure"` - - // A name for the destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` - - // The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon - // Kinesis PutRecord requests on the desitnation stream. - RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` - - // The ARN of an Amazon Kinesis stream to deliver matching log events to. - TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - if s.TargetArn == nil { - invalidParams.Add(request.NewErrParamRequired("TargetArn")) - } - if s.TargetArn != nil && len(*s.TargetArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutDestinationOutput struct { - _ struct{} `type:"structure"` - - // A cross account destination that is the recipient of subscription log events. - Destination *Destination `locationName:"destination" type:"structure"` -} - -// String returns the string representation -func (s PutDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationOutput) GoString() string { - return s.String() -} - -type PutDestinationPolicyInput struct { - _ struct{} `type:"structure"` - - // An IAM policy document that authorizes cross-account users to deliver their - // log events to associated destination. - AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` - - // A name for an existing destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutDestinationPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutDestinationPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} - if s.AccessPolicy == nil { - invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) - } - if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) - } - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutDestinationPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutDestinationPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationPolicyOutput) GoString() string { - return s.String() -} - -type PutLogEventsInput struct { - _ struct{} `type:"structure"` - - // A list of log events belonging to a log stream. - LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` - - // The name of the log group to put log events to. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to put log events to. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` - - // A string token that must be obtained from the response of the previous PutLogEvents - // request. - SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} - if s.LogEvents == nil { - invalidParams.Add(request.NewErrParamRequired("LogEvents")) - } - if s.LogEvents != nil && len(s.LogEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) - } - if s.LogEvents != nil { - for i, v := range s.LogEvents { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutLogEventsOutput struct { - _ struct{} `type:"structure"` - - // A string token used for making PutLogEvents requests. A sequenceToken can - // only be used once, and PutLogEvents requests must include the sequenceToken - // obtained from the response of the previous request. - NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` - - RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` -} - -// String returns the string representation -func (s PutLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutLogEventsOutput) GoString() string { - return s.String() -} - -type PutMetricFilterInput struct { - _ struct{} `type:"structure"` - - // A name for the metric filter. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // A valid CloudWatch Logs filter pattern for extracting metric data out of - // ingested log events. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // The name of the log group to associate the metric filter with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A collection of information needed to define how metric data gets emitted. - MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s PutMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.MetricTransformations == nil { - invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) - } - if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) - } - if s.MetricTransformations != nil { - for i, v := range s.MetricTransformations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutMetricFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutMetricFilterOutput) GoString() string { - return s.String() -} - -type PutRetentionPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to associate the retention policy with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Specifies the number of days you want to retain log events in the specified - // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, - // 365, 400, 545, 731, 1827, 3653. - RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` -} - -// String returns the string representation -func (s PutRetentionPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutRetentionPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutRetentionPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.RetentionInDays == nil { - invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutRetentionPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutRetentionPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutRetentionPolicyOutput) GoString() string { - return s.String() -} - -type PutSubscriptionFilterInput struct { - _ struct{} `type:"structure"` - - // The ARN of the destination to deliver matching log events to. Currently, - // the supported destinations are: An Amazon Kinesis stream belonging to the - // same account as the subscription filter, for same-account delivery. A logical - // destination (used via an ARN of Destination) belonging to a different account, - // for cross-account delivery. An Amazon Kinesis Firehose stream belonging - // to the same account as the subscription filter, for same-account delivery. - // An AWS Lambda function belonging to the same account as the subscription - // filter, for same-account delivery. - DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` - - // A name for the subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream - // of log events. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // The name of the log group to associate the subscription filter with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver - // ingested log events to the destination stream. You don't need to provide - // the ARN when you are working with a logical destination (used via an ARN - // of Destination) for cross-account delivery. - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutSubscriptionFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSubscriptionFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutSubscriptionFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} - if s.DestinationArn == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationArn")) - } - if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) - } - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutSubscriptionFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutSubscriptionFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSubscriptionFilterOutput) GoString() string { - return s.String() -} - -type RejectedLogEventsInfo struct { - _ struct{} `type:"structure"` - - ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` - - TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` - - TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` -} - -// String returns the string representation -func (s RejectedLogEventsInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RejectedLogEventsInfo) GoString() string { - return s.String() -} - -// An object indicating the search status of a log stream in a FilterLogEvents -// request. -type SearchedLogStream struct { - _ struct{} `type:"structure"` - - // The name of the log stream. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - // Indicates whether all the events in this log stream were searched or more - // data exists to search by paginating further. - SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` -} - -// String returns the string representation -func (s SearchedLogStream) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SearchedLogStream) GoString() string { - return s.String() -} - -type SubscriptionFilter struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` - - // A name for a metric or subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s SubscriptionFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SubscriptionFilter) GoString() string { - return s.String() -} - -type TestMetricFilterInput struct { - _ struct{} `type:"structure"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // A list of log event messages to test. - LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s TestMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TestMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TestMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogEventMessages == nil { - invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) - } - if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type TestMetricFilterOutput struct { - _ struct{} `type:"structure"` - - Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` -} - -// String returns the string representation -func (s TestMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TestMetricFilterOutput) GoString() string { - return s.String() -} - -const ( - // @enum ExportTaskStatusCode - ExportTaskStatusCodeCancelled = "CANCELLED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeCompleted = "COMPLETED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeFailed = "FAILED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodePending = "PENDING" - // @enum ExportTaskStatusCode - ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeRunning = "RUNNING" -) - -const ( - // @enum OrderBy - OrderByLogStreamName = "LogStreamName" - // @enum OrderBy - OrderByLastEventTime = "LastEventTime" -) diff --git a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go deleted file mode 100644 index e5a47270c..000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ /dev/null @@ -1,116 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package cloudwatchlogs - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" -) - -// You can use Amazon CloudWatch Logs to monitor, store, and access your log -// files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail, -// or other sources. You can then retrieve the associated log data from CloudWatch -// Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in -// the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK. -// -// You can use CloudWatch Logs to: -// -// Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch -// Logs to monitor applications and systems using log data. For example, CloudWatch -// Logs can track the number of errors that occur in your application logs and -// send you a notification whenever the rate of errors exceeds a threshold you -// specify. CloudWatch Logs uses your log data for monitoring; so, no code changes -// are required. For example, you can monitor application logs for specific -// literal terms (such as "NullReferenceException") or count the number of occurrences -// of a literal term at a particular position in log data (such as "404" status -// codes in an Apache access log). When the term you are searching for is found, -// CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify. -// -// Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon -// CloudWatch and receive notifications of particular API activity as captured -// by CloudTrail and use the notification to perform troubleshooting. -// -// Archive Log Data: You can use CloudWatch Logs to store your log data in -// highly durable storage. You can change the log retention setting so that -// any log events older than this setting are automatically deleted. The CloudWatch -// Logs agent makes it easy to quickly send both rotated and non-rotated log -// data off of a host and into the log service. You can then access the raw -// log data when you need it. -//The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -type CloudWatchLogs struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// A ServiceName is the name of the service the client will make API calls to. -const ServiceName = "logs" - -// New creates a new instance of the CloudWatchLogs client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a CloudWatchLogs client from just a session. -// svc := cloudwatchlogs.New(mySession) -// -// // Create a CloudWatchLogs client with additional configuration -// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { - c := p.ClientConfig(ServiceName, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { - svc := &CloudWatchLogs{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2014-03-28", - JSONVersion: "1.1", - TargetPrefix: "Logs_20140328", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a CloudWatchLogs operation and runs any -// custom request initialization. -func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream.go b/vendor/src/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5b..000000000 --- a/vendor/src/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/src/github.com/boltdb/bolt/.gitignore b/vendor/src/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7a5..000000000 --- a/vendor/src/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/vendor/src/github.com/boltdb/bolt/LICENSE b/vendor/src/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77fe5..000000000 --- a/vendor/src/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/boltdb/bolt/Makefile b/vendor/src/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63ad..000000000 --- a/vendor/src/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/src/github.com/boltdb/bolt/README.md b/vendor/src/github.com/boltdb/bolt/README.md deleted file mode 100644 index 3bff3cce4..000000000 --- a/vendor/src/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,850 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ = b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -contstructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/src/github.com/boltdb/bolt/appveyor.yml b/vendor/src/github.com/boltdb/bolt/appveyor.yml deleted file mode 100644 index 6e26e941d..000000000 --- a/vendor/src/github.com/boltdb/bolt/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/vendor/src/github.com/boltdb/bolt/bolt_386.go b/vendor/src/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index e659bfb91..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index cca6b7eb7..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/src/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index e659bfb91..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go deleted file mode 100644 index 6d2309352..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_linux.go b/vendor/src/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index 2b6766614..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7058c3d73..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc.go deleted file mode 100644 index 645ddc3ed..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go deleted file mode 100644 index 2dc6be02e..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go deleted file mode 100644 index 8351e129f..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go deleted file mode 100644 index f4dd26bbb..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/src/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index cad62dda1..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !windows,!plan9,!solaris - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go deleted file mode 100644 index 307bf2b3e..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go +++ /dev/null @@ -1,90 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/src/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index d538e6afd..000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,144 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - lockExt = ".lock" - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path+lockExt) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/src/github.com/boltdb/bolt/boltsync_unix.go b/vendor/src/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index f50442523..000000000 --- a/vendor/src/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/src/github.com/boltdb/bolt/bucket.go b/vendor/src/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index d2f8c524e..000000000 --- a/vendor/src/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,748 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue - } - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/src/github.com/boltdb/bolt/cursor.go b/vendor/src/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 1be9f35e3..000000000 --- a/vendor/src/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,400 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/src/github.com/boltdb/bolt/db.go b/vendor/src/github.com/boltdb/bolt/db.go deleted file mode 100644 index 1223493ca..000000000 --- a/vendor/src/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,1036 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { - db.pageSize = int(m.pageSize) - } - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/src/github.com/boltdb/bolt/doc.go b/vendor/src/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc937845d..000000000 --- a/vendor/src/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/vendor/src/github.com/boltdb/bolt/errors.go b/vendor/src/github.com/boltdb/bolt/errors.go deleted file mode 100644 index a3620a3eb..000000000 --- a/vendor/src/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/src/github.com/boltdb/bolt/freelist.go b/vendor/src/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index 0161948fc..000000000 --- a/vendor/src/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,242 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) - } - - sort.Sort(m) - return pgids(f.ids).merge(m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/vendor/src/github.com/boltdb/bolt/node.go b/vendor/src/github.com/boltdb/bolt/node.go deleted file mode 100644 index e9d64af81..000000000 --- a/vendor/src/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,599 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/src/github.com/boltdb/bolt/page.go b/vendor/src/github.com/boltdb/bolt/page.go deleted file mode 100644 index 4a555286a..000000000 --- a/vendor/src/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,172 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } else if len(b) == 0 { - return a - } - - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - merged = append(merged, follow...) - - return merged -} diff --git a/vendor/src/github.com/boltdb/bolt/tx.go b/vendor/src/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 1cfb4cde8..000000000 --- a/vendor/src/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,682 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { _ = f.Close() }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/src/github.com/cloudflare/cfssl/LICENSE b/vendor/src/github.com/cloudflare/cfssl/LICENSE deleted file mode 100644 index bc5841fa5..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/cloudflare/cfssl/api/api.go b/vendor/src/github.com/cloudflare/cfssl/api/api.go deleted file mode 100644 index f1040cade..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/api/api.go +++ /dev/null @@ -1,231 +0,0 @@ -// Package api implements an HTTP-based API and server for CFSSL. -package api - -import ( - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/log" -) - -// Handler is an interface providing a generic mechanism for handling HTTP requests. -type Handler interface { - Handle(w http.ResponseWriter, r *http.Request) error -} - -// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler. -// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods. -type HTTPHandler struct { - Handler // CFSSL handler - Methods []string // The associated HTTP methods -} - -// HandlerFunc is similar to the http.HandlerFunc type; it serves as -// an adapter allowing the use of ordinary functions as Handlers. If -// f is a function with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(http.ResponseWriter, *http.Request) error - -// Handle calls f(w, r) -func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "application/json") - return f(w, r) -} - -// handleError is the centralised error handling and reporting. -func handleError(w http.ResponseWriter, err error) (code int) { - if err == nil { - return http.StatusOK - } - msg := err.Error() - httpCode := http.StatusInternalServerError - - // If it is recognized as HttpError emitted from cfssl, - // we rewrite the status code accordingly. If it is a - // cfssl error, set the http status to StatusBadRequest - switch err := err.(type) { - case *errors.HTTPError: - httpCode = err.StatusCode - code = err.StatusCode - case *errors.Error: - httpCode = http.StatusBadRequest - code = err.ErrorCode - msg = err.Message - } - - response := NewErrorResponse(msg, code) - jsonMessage, err := json.Marshal(response) - if err != nil { - log.Errorf("Failed to marshal JSON: %v", err) - } else { - msg = string(jsonMessage) - } - http.Error(w, msg, httpCode) - return code -} - -// ServeHTTP encapsulates the call to underlying Handler to handle the request -// and return the response with proper HTTP status code -func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var err error - var match bool - // Throw 405 when requested with an unsupported verb. - for _, m := range h.Methods { - if m == r.Method { - match = true - } - } - if match { - err = h.Handle(w, r) - } else { - err = errors.NewMethodNotAllowed(r.Method) - } - status := handleError(w, err) - log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status) -} - -// readRequestBlob takes a JSON-blob-encoded response body in the form -// map[string]string and returns it, the list of keywords presented, -// and any error that occurred. -func readRequestBlob(r *http.Request) (map[string]string, error) { - var blob map[string]string - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - r.Body.Close() - - err = json.Unmarshal(body, &blob) - if err != nil { - return nil, err - } - return blob, nil -} - -// ProcessRequestOneOf reads a JSON blob for the request and makes -// sure it contains one of a set of keywords. For example, a request -// might have the ('foo' && 'bar') keys, OR it might have the 'baz' -// key. In either case, we want to accept the request; however, if -// none of these sets shows up, the request is a bad request, and it -// should be returned. -func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { - blob, err := readRequestBlob(r) - if err != nil { - return nil, nil, err - } - - var matched []string - for _, set := range keywordSets { - if matchKeywords(blob, set) { - if matched != nil { - return nil, nil, errors.NewBadRequestString("mismatched parameters") - } - matched = set - } - } - if matched == nil { - return nil, nil, errors.NewBadRequestString("no valid parameter sets found") - } - return blob, matched, nil -} - -// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns -// the first match of a set of keywords. For example, a request -// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2) -// By giving a specific ordering of those combinations, we could decide how to accept -// the request. -func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { - blob, err := readRequestBlob(r) - if err != nil { - return nil, nil, err - } - - for _, set := range keywordSets { - if matchKeywords(blob, set) { - return blob, set, nil - } - } - return nil, nil, errors.NewBadRequestString("no valid parameter sets found") -} - -func matchKeywords(blob map[string]string, keywords []string) bool { - for _, keyword := range keywords { - if _, ok := blob[keyword]; !ok { - return false - } - } - return true -} - -// ResponseMessage implements the standard for response errors and -// messages. A message has a code and a string message. -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` -} - -// Response implements the CloudFlare standard for API -// responses. -type Response struct { - Success bool `json:"success"` - Result interface{} `json:"result"` - Errors []ResponseMessage `json:"errors"` - Messages []ResponseMessage `json:"messages"` -} - -// NewSuccessResponse is a shortcut for creating new successul API -// responses. -func NewSuccessResponse(result interface{}) Response { - return Response{ - Success: true, - Result: result, - Errors: []ResponseMessage{}, - Messages: []ResponseMessage{}, - } -} - -// NewSuccessResponseWithMessage is a shortcut for creating new successul API -// responses that includes a message. -func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response { - return Response{ - Success: true, - Result: result, - Errors: []ResponseMessage{}, - Messages: []ResponseMessage{{code, message}}, - } -} - -// NewErrorResponse is a shortcut for creating an error response for a -// single error. -func NewErrorResponse(message string, code int) Response { - return Response{ - Success: false, - Result: nil, - Errors: []ResponseMessage{{code, message}}, - Messages: []ResponseMessage{}, - } -} - -// SendResponse builds a response from the result, sets the JSON -// header, and writes to the http.ResponseWriter. -func SendResponse(w http.ResponseWriter, result interface{}) error { - response := NewSuccessResponse(result) - w.Header().Set("Content-Type", "application/json") - enc := json.NewEncoder(w) - err := enc.Encode(response) - return err -} - -// SendResponseWithMessage builds a response from the result and the -// provided message, sets the JSON header, and writes to the -// http.ResponseWriter. -func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error { - response := NewSuccessResponseWithMessage(result, message, code) - w.Header().Set("Content-Type", "application/json") - enc := json.NewEncoder(w) - err := enc.Encode(response) - return err -} diff --git a/vendor/src/github.com/cloudflare/cfssl/auth/auth.go b/vendor/src/github.com/cloudflare/cfssl/auth/auth.go deleted file mode 100644 index ecd5e5fef..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/auth/auth.go +++ /dev/null @@ -1,94 +0,0 @@ -// Package auth implements an interface for providing CFSSL -// authentication. This is meant to authenticate a client CFSSL to a -// remote CFSSL in order to prevent unauthorised use of the signature -// capabilities. This package provides both the interface and a -// standard HMAC-based implementation. -package auth - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "strings" -) - -// An AuthenticatedRequest contains a request and authentication -// token. The Provider may determine whether to validate the timestamp -// and remote address. -type AuthenticatedRequest struct { - // An Authenticator decides whether to use this field. - Timestamp int64 `json:"timestamp,omitempty"` - RemoteAddress []byte `json:"remote_address,omitempty"` - Token []byte `json:"token"` - Request []byte `json:"request"` -} - -// A Provider can generate tokens from a request and verify a -// request. The handling of additional authentication data (such as -// the IP address) is handled by the concrete type, as is any -// serialisation and state-keeping. -type Provider interface { - Token(req []byte) (token []byte, err error) - Verify(aReq *AuthenticatedRequest) bool -} - -// Standard implements an HMAC-SHA-256 authentication provider. It may -// be supplied additional data at creation time that will be used as -// request || additional-data with the HMAC. -type Standard struct { - key []byte - ad []byte -} - -// New generates a new standard authentication provider from the key -// and additional data. The additional data will be used when -// generating a new token. -func New(key string, ad []byte) (*Standard, error) { - if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 { - switch splitKey[0] { - case "env": - key = os.Getenv(splitKey[1]) - case "file": - data, err := ioutil.ReadFile(splitKey[1]) - if err != nil { - return nil, err - } - key = string(data) - default: - return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0]) - } - } - - keyBytes, err := hex.DecodeString(key) - if err != nil { - return nil, err - } - - return &Standard{keyBytes, ad}, nil -} - -// Token generates a new authentication token from the request. -func (p Standard) Token(req []byte) (token []byte, err error) { - h := hmac.New(sha256.New, p.key) - h.Write(req) - h.Write(p.ad) - return h.Sum(nil), nil -} - -// Verify determines whether an authenticated request is valid. -func (p Standard) Verify(ad *AuthenticatedRequest) bool { - if ad == nil { - return false - } - - // Standard token generation returns no error. - token, _ := p.Token(ad.Request) - if len(ad.Token) != len(token) { - return false - } - - return hmac.Equal(token, ad.Token) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/README.md b/vendor/src/github.com/cloudflare/cfssl/certdb/README.md deleted file mode 100644 index 31eff57ca..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/certdb/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# certdb usage - -Using a database enables additional functionality for existing commands when a -db config is provided: - - - `sign` and `gencert` add a certificate to the certdb after signing it - - `serve` enables database functionality for the sign and revoke endpoints - -A database is required for the following: - - - `revoke` marks certificates revoked in the database with an optional reason - - `ocsprefresh` refreshes the table of cached OCSP responses - - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format - -## Setup/Migration - -This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends. -Currently supported: - - MySQL in mysql - - PostgreSQL in pg - - SQLite in sqlite - -### Get goose - - go get bitbucket.org/liamstask/goose/cmd/goose - -### Use goose to start and terminate a MySQL DB -To start a MySQL using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up - -To tear down a MySQL DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down - -Note: the administration of MySQL DB is not included. We assume -the databases being connected to are already created and access control -is properly handled. - -### Use goose to start and terminate a PostgreSQL DB -To start a PostgreSQL using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up - -To tear down a PostgreSQL DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down - -Note: the administration of PostgreSQL DB is not included. We assume -the databases being connected to are already created and access control -is properly handled. - -### Use goose to start and terminate a SQLite DB -To start a SQLite DB using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up - -To tear down a SQLite DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down - -## CFSSL Configuration - -Several cfssl commands take a -db-config flag. Create a file with a -JSON dictionary: - - {"driver":"sqlite3","data_source":"certs.db"} - -or - - {"driver":"postgres","data_source":"postgres://user:password@host/db"} diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go deleted file mode 100644 index 96694f768..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go +++ /dev/null @@ -1,40 +0,0 @@ -package certdb - -import ( - "time" -) - -// CertificateRecord encodes a certificate and its metadata -// that will be recorded in a database. -type CertificateRecord struct { - Serial string `db:"serial_number"` - AKI string `db:"authority_key_identifier"` - CALabel string `db:"ca_label"` - Status string `db:"status"` - Reason int `db:"reason"` - Expiry time.Time `db:"expiry"` - RevokedAt time.Time `db:"revoked_at"` - PEM string `db:"pem"` -} - -// OCSPRecord encodes a OCSP response body and its metadata -// that will be recorded in a database. -type OCSPRecord struct { - Serial string `db:"serial_number"` - AKI string `db:"authority_key_identifier"` - Body string `db:"body"` - Expiry time.Time `db:"expiry"` -} - -// Accessor abstracts the CRUD of certdb objects from a DB. -type Accessor interface { - InsertCertificate(cr CertificateRecord) error - GetCertificate(serial, aki string) ([]CertificateRecord, error) - GetUnexpiredCertificates() ([]CertificateRecord, error) - RevokeCertificate(serial, aki string, reasonCode int) error - InsertOCSP(rr OCSPRecord) error - GetOCSP(serial, aki string) ([]OCSPRecord, error) - GetUnexpiredOCSPs() ([]OCSPRecord, error) - UpdateOCSP(serial, aki, body string, expiry time.Time) error - UpsertOCSP(serial, aki, body string, expiry time.Time) error -} diff --git a/vendor/src/github.com/cloudflare/cfssl/config/config.go b/vendor/src/github.com/cloudflare/cfssl/config/config.go deleted file mode 100644 index a6837eb0d..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/config/config.go +++ /dev/null @@ -1,563 +0,0 @@ -// Package config contains the configuration logic for CFSSL. -package config - -import ( - "crypto/x509" - "encoding/asn1" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" - "time" - - "github.com/cloudflare/cfssl/auth" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" - ocspConfig "github.com/cloudflare/cfssl/ocsp/config" -) - -// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is -// not present in a SigningProfile, all of these fields may be copied from the -// CSR into the signed certificate. If a CSRWhitelist *is* present in a -// SigningProfile, only those fields with a `true` value in the CSRWhitelist may -// be copied from the CSR to the signed certificate. Note that some of these -// fields, like Subject, can be provided or partially provided through the API. -// Since API clients are expected to be trusted, but CSRs are not, fields -// provided through the API are not subject to whitelisting through this -// mechanism. -type CSRWhitelist struct { - Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool - DNSNames, IPAddresses, EmailAddresses bool -} - -// OID is our own version of asn1's ObjectIdentifier, so we can define a custom -// JSON marshal / unmarshal. -type OID asn1.ObjectIdentifier - -// CertificatePolicy represents the ASN.1 PolicyInformation structure from -// https://tools.ietf.org/html/rfc3280.html#page-106. -// Valid values of Type are "id-qt-unotice" and "id-qt-cps" -type CertificatePolicy struct { - ID OID - Qualifiers []CertificatePolicyQualifier -} - -// CertificatePolicyQualifier represents a single qualifier from an ASN.1 -// PolicyInformation structure. -type CertificatePolicyQualifier struct { - Type string - Value string -} - -// AuthRemote is an authenticated remote signer. -type AuthRemote struct { - RemoteName string `json:"remote"` - AuthKeyName string `json:"auth_key"` -} - -// A SigningProfile stores information that the CA needs to store -// signature policy. -type SigningProfile struct { - Usage []string `json:"usages"` - IssuerURL []string `json:"issuer_urls"` - OCSP string `json:"ocsp_url"` - CRL string `json:"crl_url"` - CA bool `json:"is_ca"` - OCSPNoCheck bool `json:"ocsp_no_check"` - ExpiryString string `json:"expiry"` - BackdateString string `json:"backdate"` - AuthKeyName string `json:"auth_key"` - RemoteName string `json:"remote"` - NotBefore time.Time `json:"not_before"` - NotAfter time.Time `json:"not_after"` - NameWhitelistString string `json:"name_whitelist"` - AuthRemote AuthRemote `json:"auth_remote"` - CTLogServers []string `json:"ct_log_servers"` - AllowedExtensions []OID `json:"allowed_extensions"` - CertStore string `json:"cert_store"` - - Policies []CertificatePolicy - Expiry time.Duration - Backdate time.Duration - Provider auth.Provider - RemoteProvider auth.Provider - RemoteServer string - CSRWhitelist *CSRWhitelist - NameWhitelist *regexp.Regexp - ExtensionWhitelist map[string]bool - ClientProvidesSerialNumbers bool -} - -// UnmarshalJSON unmarshals a JSON string into an OID. -func (oid *OID) UnmarshalJSON(data []byte) (err error) { - if data[0] != '"' || data[len(data)-1] != '"' { - return errors.New("OID JSON string not wrapped in quotes." + string(data)) - } - data = data[1 : len(data)-1] - parsedOid, err := parseObjectIdentifier(string(data)) - if err != nil { - return err - } - *oid = OID(parsedOid) - return -} - -// MarshalJSON marshals an oid into a JSON string. -func (oid OID) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil -} - -func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) { - validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString) - if err != nil { - return - } - if !validOID { - err = errors.New("Invalid OID") - return - } - - segments := strings.Split(oidString, ".") - oid = make(asn1.ObjectIdentifier, len(segments)) - for i, intString := range segments { - oid[i], err = strconv.Atoi(intString) - if err != nil { - return - } - } - return -} - -const timeFormat = "2006-01-02T15:04:05" - -// populate is used to fill in the fields that are not in JSON -// -// First, the ExpiryString parameter is needed to parse -// expiration timestamps from JSON. The JSON decoder is not able to -// decode a string time duration to a time.Duration, so this is called -// when loading the configuration to properly parse and fill out the -// Expiry parameter. -// This function is also used to create references to the auth key -// and default remote for the profile. -// It returns true if ExpiryString is a valid representation of a -// time.Duration, and the AuthKeyString and RemoteName point to -// valid objects. It returns false otherwise. -func (p *SigningProfile) populate(cfg *Config) error { - if p == nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile")) - } - - var err error - if p.RemoteName == "" && p.AuthRemote.RemoteName == "" { - log.Debugf("parse expiry in profile") - if p.ExpiryString == "" { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string")) - } - - dur, err := time.ParseDuration(p.ExpiryString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - log.Debugf("expiry is valid") - p.Expiry = dur - - if p.BackdateString != "" { - dur, err = time.ParseDuration(p.BackdateString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - p.Backdate = dur - } - - if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - if len(p.Policies) > 0 { - for _, policy := range p.Policies { - for _, qualifier := range policy.Qualifiers { - if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("invalid policy qualifier type")) - } - } - } - } - } else if p.RemoteName != "" { - log.Debug("match remote in profile to remotes section") - if p.AuthRemote.RemoteName != "" { - log.Error("profile has both a remote and an auth remote specified") - return cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - if remote := cfg.Remotes[p.RemoteName]; remote != "" { - if err := p.updateRemote(remote); err != nil { - return err - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find remote in remotes section")) - } - } else { - log.Debug("match auth remote in profile to remotes section") - if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" { - if err := p.updateRemote(remote); err != nil { - return err - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find remote in remotes section")) - } - } - - if p.AuthKeyName != "" { - log.Debug("match auth key in profile to auth_keys section") - if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true { - if key.Type == "standard" { - p.Provider, err = auth.New(key.Key, nil) - if err != nil { - log.Debugf("failed to create new standard auth provider: %v", err) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to create new standard auth provider")) - } - } else { - log.Debugf("unknown authentication type %v", key.Type) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("unknown authentication type")) - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find auth_key in auth_keys section")) - } - } - - if p.AuthRemote.AuthKeyName != "" { - log.Debug("match auth remote key in profile to auth_keys section") - if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true { - if key.Type == "standard" { - p.RemoteProvider, err = auth.New(key.Key, nil) - if err != nil { - log.Debugf("failed to create new standard auth provider: %v", err) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to create new standard auth provider")) - } - } else { - log.Debugf("unknown authentication type %v", key.Type) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("unknown authentication type")) - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find auth_remote's auth_key in auth_keys section")) - } - } - - if p.NameWhitelistString != "" { - log.Debug("compiling whitelist regular expression") - rule, err := regexp.Compile(p.NameWhitelistString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to compile name whitelist section")) - } - p.NameWhitelist = rule - } - - p.ExtensionWhitelist = map[string]bool{} - for _, oid := range p.AllowedExtensions { - p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true - } - - return nil -} - -// updateRemote takes a signing profile and initializes the remote server object -// to the hostname:port combination sent by remote. -func (p *SigningProfile) updateRemote(remote string) error { - if remote != "" { - p.RemoteServer = remote - } - return nil -} - -// OverrideRemotes takes a signing configuration and updates the remote server object -// to the hostname:port combination sent by remote -func (p *Signing) OverrideRemotes(remote string) error { - if remote != "" { - var err error - for _, profile := range p.Profiles { - err = profile.updateRemote(remote) - if err != nil { - return err - } - } - err = p.Default.updateRemote(remote) - if err != nil { - return err - } - } - return nil -} - -// NeedsRemoteSigner returns true if one of the profiles has a remote set -func (p *Signing) NeedsRemoteSigner() bool { - for _, profile := range p.Profiles { - if profile.RemoteServer != "" { - return true - } - } - - if p.Default.RemoteServer != "" { - return true - } - - return false -} - -// NeedsLocalSigner returns true if one of the profiles doe not have a remote set -func (p *Signing) NeedsLocalSigner() bool { - for _, profile := range p.Profiles { - if profile.RemoteServer == "" { - return true - } - } - - if p.Default.RemoteServer == "" { - return true - } - - return false -} - -// Usages parses the list of key uses in the profile, translating them -// to a list of X.509 key usages and extended key usages. The unknown -// uses are collected into a slice that is also returned. -func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) { - for _, keyUse := range p.Usage { - if kuse, ok := KeyUsage[keyUse]; ok { - ku |= kuse - } else if ekuse, ok := ExtKeyUsage[keyUse]; ok { - eku = append(eku, ekuse) - } else { - unk = append(unk, keyUse) - } - } - return -} - -// A valid profile must be a valid local profile or a valid remote profile. -// A valid local profile has defined at least key usages to be used, and a -// valid local default profile has defined at least a default expiration. -// A valid remote profile (default or not) has remote signer initialized. -// In addition, a remote profile must has a valid auth provider if auth -// key defined. -func (p *SigningProfile) validProfile(isDefault bool) bool { - if p == nil { - return false - } - - if p.RemoteName != "" { - log.Debugf("validate remote profile") - - if p.RemoteServer == "" { - log.Debugf("invalid remote profile: no remote signer specified") - return false - } - - if p.AuthKeyName != "" && p.Provider == nil { - log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set") - return false - } - - if p.AuthRemote.RemoteName != "" { - log.Debugf("invalid remote profile: auth remote is also specified") - } - } else if p.AuthRemote.RemoteName != "" { - log.Debugf("validate auth remote profile") - if p.RemoteServer == "" { - log.Debugf("invalid auth remote profile: no remote signer specified") - return false - } - - if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil { - log.Debugf("invalid auth remote profile: no auth key is defined") - return false - } - } else { - log.Debugf("validate local profile") - if !isDefault { - if len(p.Usage) == 0 { - log.Debugf("invalid local profile: no usages specified") - return false - } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) { - log.Debugf("invalid local profile: no valid usages") - return false - } - } else { - if p.Expiry == 0 { - log.Debugf("invalid local profile: no expiry set") - return false - } - } - } - - log.Debugf("profile is valid") - return true -} - -// Signing codifies the signature configuration policy for a CA. -type Signing struct { - Profiles map[string]*SigningProfile `json:"profiles"` - Default *SigningProfile `json:"default"` -} - -// Config stores configuration information for the CA. -type Config struct { - Signing *Signing `json:"signing"` - OCSP *ocspConfig.Config `json:"ocsp"` - AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"` - Remotes map[string]string `json:"remotes,omitempty"` -} - -// Valid ensures that Config is a valid configuration. It should be -// called immediately after parsing a configuration file. -func (c *Config) Valid() bool { - return c.Signing.Valid() -} - -// Valid checks the signature policies, ensuring they are valid -// policies. A policy is valid if it has defined at least key usages -// to be used, and a valid default profile has defined at least a -// default expiration. -func (p *Signing) Valid() bool { - if p == nil { - return false - } - - log.Debugf("validating configuration") - if !p.Default.validProfile(true) { - log.Debugf("default profile is invalid") - return false - } - - for _, sp := range p.Profiles { - if !sp.validProfile(false) { - log.Debugf("invalid profile") - return false - } - } - return true -} - -// KeyUsage contains a mapping of string names to key usages. -var KeyUsage = map[string]x509.KeyUsage{ - "signing": x509.KeyUsageDigitalSignature, - "digital signature": x509.KeyUsageDigitalSignature, - "content committment": x509.KeyUsageContentCommitment, - "key encipherment": x509.KeyUsageKeyEncipherment, - "key agreement": x509.KeyUsageKeyAgreement, - "data encipherment": x509.KeyUsageDataEncipherment, - "cert sign": x509.KeyUsageCertSign, - "crl sign": x509.KeyUsageCRLSign, - "encipher only": x509.KeyUsageEncipherOnly, - "decipher only": x509.KeyUsageDecipherOnly, -} - -// ExtKeyUsage contains a mapping of string names to extended key -// usages. -var ExtKeyUsage = map[string]x509.ExtKeyUsage{ - "any": x509.ExtKeyUsageAny, - "server auth": x509.ExtKeyUsageServerAuth, - "client auth": x509.ExtKeyUsageClientAuth, - "code signing": x509.ExtKeyUsageCodeSigning, - "email protection": x509.ExtKeyUsageEmailProtection, - "s/mime": x509.ExtKeyUsageEmailProtection, - "ipsec end system": x509.ExtKeyUsageIPSECEndSystem, - "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel, - "ipsec user": x509.ExtKeyUsageIPSECUser, - "timestamping": x509.ExtKeyUsageTimeStamping, - "ocsp signing": x509.ExtKeyUsageOCSPSigning, - "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto, - "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto, -} - -// An AuthKey contains an entry for a key used for authentication. -type AuthKey struct { - // Type contains information needed to select the appropriate - // constructor. For example, "standard" for HMAC-SHA-256, - // "standard-ip" for HMAC-SHA-256 incorporating the client's - // IP. - Type string `json:"type"` - // Key contains the key information, such as a hex-encoded - // HMAC key. - Key string `json:"key"` -} - -// DefaultConfig returns a default configuration specifying basic key -// usage and a 1 year expiration time. The key usages chosen are -// signing, key encipherment, client auth and server auth. -func DefaultConfig() *SigningProfile { - d := helpers.OneYear - return &SigningProfile{ - Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, - Expiry: d, - ExpiryString: "8760h", - } -} - -// LoadFile attempts to load the configuration file stored at the path -// and returns the configuration. On error, it returns nil. -func LoadFile(path string) (*Config, error) { - log.Debugf("loading configuration file from %s", path) - if path == "" { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path")) - } - - body, err := ioutil.ReadFile(path) - if err != nil { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file")) - } - - return LoadConfig(body) -} - -// LoadConfig attempts to load the configuration from a byte slice. -// On error, it returns nil. -func LoadConfig(config []byte) (*Config, error) { - var cfg = &Config{} - err := json.Unmarshal(config, &cfg) - if err != nil { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to unmarshal configuration: "+err.Error())) - } - - if cfg.Signing == nil { - return nil, errors.New("No \"signing\" field present") - } - - if cfg.Signing.Default == nil { - log.Debugf("no default given: using default config") - cfg.Signing.Default = DefaultConfig() - } else { - if err := cfg.Signing.Default.populate(cfg); err != nil { - return nil, err - } - } - - for k := range cfg.Signing.Profiles { - if err := cfg.Signing.Profiles[k].populate(cfg); err != nil { - return nil, err - } - } - - if !cfg.Valid() { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration")) - } - - log.Debugf("configuration ok") - return cfg, nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go deleted file mode 100644 index 8db547fce..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go +++ /dev/null @@ -1,188 +0,0 @@ -// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically -// used to package certificates and CRLs. Using openssl, every certificate converted -// to PKCS #7 format from another encoding such as PEM conforms to this implementation. -// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html -// -// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315 -// -// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements, -// for example data can be encrypted and signed and then packaged through pkcs#7 to be -// sent over a network and then verified and decrypted. It is asn1, and the type of -// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is: -// -// ContentInfo ::= SEQUENCE { -// contentType ContentType, -// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL -// } -// -// There are 6 possible ContentTypes, data, signedData, envelopedData, -// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted -// Data are implemented, as the degenerate case of signedData without a signature is the typical -// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12 -// formats. -// The ContentType signedData has the form: -// -// -// signedData ::= SEQUENCE { -// version Version, -// digestAlgorithms DigestAlgorithmIdentifiers, -// contentInfo ContentInfo, -// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL -// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL, -// signerInfos SignerInfos -// } -// -// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to -// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is -// recursive, this second layer of ContentInfo is similar ignored for our degenerate -// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices -// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting -// of any number of extended certificates is not yet supported in this implementation. -// -// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice. -// -// The ContentType encryptedData is the most complicated and its form can be gathered by -// the go type below. It essentially contains a raw octet string of encrypted data and an -// algorithm identifier for use in decrypting this data. -package pkcs7 - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - - cferr "github.com/cloudflare/cfssl/errors" -) - -// Types used for asn1 Unmarshaling. - -type signedData struct { - Version int - DigestAlgorithms asn1.RawValue - ContentInfo asn1.RawValue - Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"` - Crls asn1.RawValue `asn1:"optional"` - SignerInfos asn1.RawValue -} - -type initPKCS7 struct { - Raw asn1.RawContent - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"tag:0,explicit,optional"` -} - -// Object identifier strings of the three implemented PKCS7 types. -const ( - ObjIDData = "1.2.840.113549.1.7.1" - ObjIDSignedData = "1.2.840.113549.1.7.2" - ObjIDEncryptedData = "1.2.840.113549.1.7.6" -) - -// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three -// possible types of Content objects, as denoted by the object identifier in -// the ContentInfo field, the other two being nil. SignedData -// is the degenerate SignedData Content info without signature used -// to hold certificates and crls. Data is raw bytes, and EncryptedData -// is as defined in PKCS #7 standard. -type PKCS7 struct { - Raw asn1.RawContent - ContentInfo string - Content Content -} - -// Content implements three of the six possible PKCS7 data types. Only one is non-nil. -type Content struct { - Data []byte - SignedData SignedData - EncryptedData EncryptedData -} - -// SignedData defines the typical carrier of certificates and crls. -type SignedData struct { - Raw asn1.RawContent - Version int - Certificates []*x509.Certificate - Crl *pkix.CertificateList -} - -// Data contains raw bytes. Used as a subtype in PKCS12. -type Data struct { - Bytes []byte -} - -// EncryptedData contains encrypted data. Used as a subtype in PKCS12. -type EncryptedData struct { - Raw asn1.RawContent - Version int - EncryptedContentInfo EncryptedContentInfo -} - -// EncryptedContentInfo is a subtype of PKCS7EncryptedData. -type EncryptedContentInfo struct { - Raw asn1.RawContent - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent []byte `asn1:"tag:0,optional"` -} - -// ParsePKCS7 attempts to parse the DER encoded bytes of a -// PKCS7 structure. -func ParsePKCS7(raw []byte) (msg *PKCS7, err error) { - - var pkcs7 initPKCS7 - _, err = asn1.Unmarshal(raw, &pkcs7) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - - msg = new(PKCS7) - msg.Raw = pkcs7.Raw - msg.ContentInfo = pkcs7.ContentType.String() - switch { - case msg.ContentInfo == ObjIDData: - msg.ContentInfo = "Data" - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - case msg.ContentInfo == ObjIDSignedData: - msg.ContentInfo = "SignedData" - var signedData signedData - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - if len(signedData.Certificates.Bytes) != 0 { - msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - if len(signedData.Crls.Bytes) != 0 { - msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - msg.Content.SignedData.Version = signedData.Version - msg.Content.SignedData.Raw = pkcs7.Content.Bytes - case msg.ContentInfo == ObjIDEncryptedData: - msg.ContentInfo = "EncryptedData" - var encryptedData EncryptedData - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - if encryptedData.Version != 0 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0")) - } - msg.Content.EncryptedData = encryptedData - - default: - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data")) - } - - return msg, nil - -} diff --git a/vendor/src/github.com/cloudflare/cfssl/csr/csr.go b/vendor/src/github.com/cloudflare/cfssl/csr/csr.go deleted file mode 100644 index 4329b7956..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/csr/csr.go +++ /dev/null @@ -1,431 +0,0 @@ -// Package csr implements certificate requests for CFSSL. -package csr - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "errors" - "net" - "net/mail" - "strings" - - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" -) - -const ( - curveP256 = 256 - curveP384 = 384 - curveP521 = 521 -) - -// A Name contains the SubjectInfo fields. -type Name struct { - C string // Country - ST string // State - L string // Locality - O string // OrganisationName - OU string // OrganisationalUnitName - SerialNumber string -} - -// A KeyRequest is a generic request for a new key. -type KeyRequest interface { - Algo() string - Size() int - Generate() (crypto.PrivateKey, error) - SigAlgo() x509.SignatureAlgorithm -} - -// A BasicKeyRequest contains the algorithm and key size for a new private key. -type BasicKeyRequest struct { - A string `json:"algo"` - S int `json:"size"` -} - -// NewBasicKeyRequest returns a default BasicKeyRequest. -func NewBasicKeyRequest() *BasicKeyRequest { - return &BasicKeyRequest{"ecdsa", curveP256} -} - -// Algo returns the requested key algorithm represented as a string. -func (kr *BasicKeyRequest) Algo() string { - return kr.A -} - -// Size returns the requested key size. -func (kr *BasicKeyRequest) Size() int { - return kr.S -} - -// Generate generates a key as specified in the request. Currently, -// only ECDSA and RSA are supported. -func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) { - log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size()) - switch kr.Algo() { - case "rsa": - if kr.Size() < 2048 { - return nil, errors.New("RSA key is too weak") - } - if kr.Size() > 8192 { - return nil, errors.New("RSA key size too large") - } - return rsa.GenerateKey(rand.Reader, kr.Size()) - case "ecdsa": - var curve elliptic.Curve - switch kr.Size() { - case curveP256: - curve = elliptic.P256() - case curveP384: - curve = elliptic.P384() - case curveP521: - curve = elliptic.P521() - default: - return nil, errors.New("invalid curve") - } - return ecdsa.GenerateKey(curve, rand.Reader) - default: - return nil, errors.New("invalid algorithm") - } -} - -// SigAlgo returns an appropriate X.509 signature algorithm given the -// key request's type and size. -func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm { - switch kr.Algo() { - case "rsa": - switch { - case kr.Size() >= 4096: - return x509.SHA512WithRSA - case kr.Size() >= 3072: - return x509.SHA384WithRSA - case kr.Size() >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case "ecdsa": - switch kr.Size() { - case curveP521: - return x509.ECDSAWithSHA512 - case curveP384: - return x509.ECDSAWithSHA384 - case curveP256: - return x509.ECDSAWithSHA256 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} - -// CAConfig is a section used in the requests initialising a new CA. -type CAConfig struct { - PathLength int `json:"pathlen"` - PathLenZero bool `json:"pathlenzero"` - Expiry string `json:"expiry"` -} - -// A CertificateRequest encapsulates the API interface to the -// certificate request functionality. -type CertificateRequest struct { - CN string - Names []Name `json:"names"` - Hosts []string `json:"hosts"` - KeyRequest KeyRequest `json:"key,omitempty"` - CA *CAConfig `json:"ca,omitempty"` - SerialNumber string `json:"serialnumber,omitempty"` -} - -// New returns a new, empty CertificateRequest with a -// BasicKeyRequest. -func New() *CertificateRequest { - return &CertificateRequest{ - KeyRequest: NewBasicKeyRequest(), - } -} - -// appendIf appends to a if s is not an empty string. -func appendIf(s string, a *[]string) { - if s != "" { - *a = append(*a, s) - } -} - -// Name returns the PKIX name for the request. -func (cr *CertificateRequest) Name() pkix.Name { - var name pkix.Name - name.CommonName = cr.CN - - for _, n := range cr.Names { - appendIf(n.C, &name.Country) - appendIf(n.ST, &name.Province) - appendIf(n.L, &name.Locality) - appendIf(n.O, &name.Organization) - appendIf(n.OU, &name.OrganizationalUnit) - } - name.SerialNumber = cr.SerialNumber - return name -} - -// BasicConstraints CSR information RFC 5280, 4.2.1.9 -type BasicConstraints struct { - IsCA bool `asn1:"optional"` - MaxPathLen int `asn1:"optional,default:-1"` -} - -// ParseRequest takes a certificate request and generates a key and -// CSR from it. It does no validation -- caveat emptor. It will, -// however, fail if the key request is not valid (i.e., an unsupported -// curve or RSA key size). The lack of validation was specifically -// chosen to allow the end user to define a policy and validate the -// request appropriately before calling this function. -func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) { - log.Info("received CSR") - if req.KeyRequest == nil { - req.KeyRequest = NewBasicKeyRequest() - } - - log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size()) - priv, err := req.KeyRequest.Generate() - if err != nil { - err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err) - return - } - - switch priv := priv.(type) { - case *rsa.PrivateKey: - key = x509.MarshalPKCS1PrivateKey(priv) - block := pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: key, - } - key = pem.EncodeToMemory(&block) - case *ecdsa.PrivateKey: - key, err = x509.MarshalECPrivateKey(priv) - if err != nil { - err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err) - return - } - block := pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: key, - } - key = pem.EncodeToMemory(&block) - default: - panic("Generate should have failed to produce a valid key.") - } - - csr, err = Generate(priv.(crypto.Signer), req) - if err != nil { - log.Errorf("failed to generate a CSR: %v", err) - err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) - } - return -} - -// ExtractCertificateRequest extracts a CertificateRequest from -// x509.Certificate. It is aimed to used for generating a new certificate -// from an existing certificate. For a root certificate, the CA expiry -// length is calculated as the duration between cert.NotAfter and cert.NotBefore. -func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest { - req := New() - req.CN = cert.Subject.CommonName - req.Names = getNames(cert.Subject) - req.Hosts = getHosts(cert) - req.SerialNumber = cert.Subject.SerialNumber - - if cert.IsCA { - req.CA = new(CAConfig) - // CA expiry length is calculated based on the input cert - // issue date and expiry date. - req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String() - req.CA.PathLength = cert.MaxPathLen - req.CA.PathLenZero = cert.MaxPathLenZero - } - - return req -} - -func getHosts(cert *x509.Certificate) []string { - var hosts []string - for _, ip := range cert.IPAddresses { - hosts = append(hosts, ip.String()) - } - for _, dns := range cert.DNSNames { - hosts = append(hosts, dns) - } - for _, email := range cert.EmailAddresses { - hosts = append(hosts, email) - } - - return hosts -} - -// getNames returns an array of Names from the certificate -// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province -func getNames(sub pkix.Name) []Name { - // anonymous func for finding the max of a list of interger - max := func(v1 int, vn ...int) (max int) { - max = v1 - for i := 0; i < len(vn); i++ { - if vn[i] > max { - max = vn[i] - } - } - return max - } - - nc := len(sub.Country) - norg := len(sub.Organization) - nou := len(sub.OrganizationalUnit) - nl := len(sub.Locality) - np := len(sub.Province) - - n := max(nc, norg, nou, nl, np) - - names := make([]Name, n) - for i := range names { - if i < nc { - names[i].C = sub.Country[i] - } - if i < norg { - names[i].O = sub.Organization[i] - } - if i < nou { - names[i].OU = sub.OrganizationalUnit[i] - } - if i < nl { - names[i].L = sub.Locality[i] - } - if i < np { - names[i].ST = sub.Province[i] - } - } - return names -} - -// A Generator is responsible for validating certificate requests. -type Generator struct { - Validator func(*CertificateRequest) error -} - -// ProcessRequest validates and processes the incoming request. It is -// a wrapper around a validator and the ParseRequest function. -func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) { - - log.Info("generate received request") - err = g.Validator(req) - if err != nil { - log.Warningf("invalid request: %v", err) - return - } - - csr, key, err = ParseRequest(req) - if err != nil { - return nil, nil, err - } - return -} - -// IsNameEmpty returns true if the name has no identifying information in it. -func IsNameEmpty(n Name) bool { - empty := func(s string) bool { return strings.TrimSpace(s) == "" } - - if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) { - return true - } - return false -} - -// Regenerate uses the provided CSR as a template for signing a new -// CSR using priv. -func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) { - req, extra, err := helpers.ParseCSR(csr) - if err != nil { - return nil, err - } else if len(extra) > 0 { - return nil, errors.New("csr: trailing data in certificate request") - } - - return x509.CreateCertificateRequest(rand.Reader, req, priv) -} - -// Generate creates a new CSR from a CertificateRequest structure and -// an existing key. The KeyRequest field is ignored. -func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) { - sigAlgo := helpers.SignerAlgo(priv) - if sigAlgo == x509.UnknownSignatureAlgorithm { - return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable) - } - - var tpl = x509.CertificateRequest{ - Subject: req.Name(), - SignatureAlgorithm: sigAlgo, - } - - for i := range req.Hosts { - if ip := net.ParseIP(req.Hosts[i]); ip != nil { - tpl.IPAddresses = append(tpl.IPAddresses, ip) - } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil { - tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address) - } else { - tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i]) - } - } - - if req.CA != nil { - err = appendCAInfoToCSR(req.CA, &tpl) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err) - return - } - } - - csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv) - if err != nil { - log.Errorf("failed to generate a CSR: %v", err) - err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) - return - } - block := pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - } - - log.Info("encoded CSR") - csr = pem.EncodeToMemory(&block) - return -} - -// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR -func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error { - pathlen := reqConf.PathLength - if pathlen == 0 && !reqConf.PathLenZero { - pathlen = -1 - } - val, err := asn1.Marshal(BasicConstraints{true, pathlen}) - - if err != nil { - return err - } - - csr.ExtraExtensions = []pkix.Extension{ - { - Id: asn1.ObjectIdentifier{2, 5, 29, 19}, - Value: val, - Critical: true, - }, - } - - return nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/doc.go b/vendor/src/github.com/cloudflare/cfssl/errors/doc.go deleted file mode 100644 index 1910e2662..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package errors provides error types returned in CF SSL. - -1. Type Error is intended for errors produced by CF SSL packages. -It formats to a json object that consists of an error message and a 4-digit code for error reasoning. - -Example: {"code":1002, "message": "Failed to decode certificate"} - -The index of codes are listed below: - 1XXX: CertificateError - 1000: Unknown - 1001: ReadFailed - 1002: DecodeFailed - 1003: ParseFailed - 1100: SelfSigned - 12XX: VerifyFailed - 121X: CertificateInvalid - 1210: NotAuthorizedToSign - 1211: Expired - 1212: CANotAuthorizedForThisName - 1213: TooManyIntermediates - 1214: IncompatibleUsage - 1220: UnknownAuthority - 2XXX: PrivatekeyError - 2000: Unknown - 2001: ReadFailed - 2002: DecodeFailed - 2003: ParseFailed - 2100: Encrypted - 2200: NotRSA - 2300: KeyMismatch - 2400: GenerationFailed - 2500: Unavailable - 3XXX: IntermediatesError - 4XXX: RootError - 5XXX: PolicyError - 5100: NoKeyUsages - 5200: InvalidPolicy - 5300: InvalidRequest - 5400: UnknownProfile - 6XXX: DialError - -2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned -by the API server. -*/ -package errors diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/error.go b/vendor/src/github.com/cloudflare/cfssl/errors/error.go deleted file mode 100644 index 88663b2c6..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/error.go +++ /dev/null @@ -1,420 +0,0 @@ -package errors - -import ( - "crypto/x509" - "encoding/json" - "fmt" -) - -// Error is the error type usually returned by functions in CF SSL package. -// It contains a 4-digit error code where the most significant digit -// describes the category where the error occurred and the rest 3 digits -// describe the specific error reason. -type Error struct { - ErrorCode int `json:"code"` - Message string `json:"message"` -} - -// Category is the most significant digit of the error code. -type Category int - -// Reason is the last 3 digits of the error code. -type Reason int - -const ( - // Success indicates no error occurred. - Success Category = 1000 * iota // 0XXX - - // CertificateError indicates a fault in a certificate. - CertificateError // 1XXX - - // PrivateKeyError indicates a fault in a private key. - PrivateKeyError // 2XXX - - // IntermediatesError indicates a fault in an intermediate. - IntermediatesError // 3XXX - - // RootError indicates a fault in a root. - RootError // 4XXX - - // PolicyError indicates an error arising from a malformed or - // non-existent policy, or a breach of policy. - PolicyError // 5XXX - - // DialError indicates a network fault. - DialError // 6XXX - - // APIClientError indicates a problem with the API client. - APIClientError // 7XXX - - // OCSPError indicates a problem with OCSP signing - OCSPError // 8XXX - - // CSRError indicates a problem with CSR parsing - CSRError // 9XXX - - // CTError indicates a problem with the certificate transparency process - CTError // 10XXX - - // CertStoreError indicates a problem with the certificate store - CertStoreError // 11XXX -) - -// None is a non-specified error. -const ( - None Reason = iota -) - -// Warning code for a success -const ( - BundleExpiringBit int = 1 << iota // 0x01 - BundleNotUbiquitousBit // 0x02 -) - -// Parsing errors -const ( - Unknown Reason = iota // X000 - ReadFailed // X001 - DecodeFailed // X002 - ParseFailed // X003 -) - -// The following represent certificate non-parsing errors, and must be -// specified along with CertificateError. -const ( - // SelfSigned indicates that a certificate is self-signed and - // cannot be used in the manner being attempted. - SelfSigned Reason = 100 * (iota + 1) // Code 11XX - - // VerifyFailed is an X.509 verification failure. The least two - // significant digits of 12XX is determined as the actual x509 - // error is examined. - VerifyFailed // Code 12XX - - // BadRequest indicates that the certificate request is invalid. - BadRequest // Code 13XX - - // MissingSerial indicates that the profile specified - // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial - // number. - MissingSerial // Code 14XX -) - -const ( - certificateInvalid = 10 * (iota + 1) //121X - unknownAuthority //122x -) - -// The following represent private-key non-parsing errors, and must be -// specified with PrivateKeyError. -const ( - // Encrypted indicates that the private key is a PKCS #8 encrypted - // private key. At this time, CFSSL does not support decrypting - // these keys. - Encrypted Reason = 100 * (iota + 1) //21XX - - // NotRSAOrECC indicates that they key is not an RSA or ECC - // private key; these are the only two private key types supported - // at this time by CFSSL. - NotRSAOrECC //22XX - - // KeyMismatch indicates that the private key does not match - // the public key or certificate being presented with the key. - KeyMismatch //23XX - - // GenerationFailed indicates that a private key could not - // be generated. - GenerationFailed //24XX - - // Unavailable indicates that a private key mechanism (such as - // PKCS #11) was requested but support for that mechanism is - // not available. - Unavailable -) - -// The following are policy-related non-parsing errors, and must be -// specified along with PolicyError. -const ( - // NoKeyUsages indicates that the profile does not permit any - // key usages for the certificate. - NoKeyUsages Reason = 100 * (iota + 1) // 51XX - - // InvalidPolicy indicates that policy being requested is not - // a valid policy or does not exist. - InvalidPolicy // 52XX - - // InvalidRequest indicates a certificate request violated the - // constraints of the policy being applied to the request. - InvalidRequest // 53XX - - // UnknownProfile indicates that the profile does not exist. - UnknownProfile // 54XX -) - -// The following are API client related errors, and should be -// specified with APIClientError. -const ( - // AuthenticationFailure occurs when the client is unable - // to obtain an authentication token for the request. - AuthenticationFailure Reason = 100 * (iota + 1) - - // JSONError wraps an encoding/json error. - JSONError - - // IOError wraps an io/ioutil error. - IOError - - // ClientHTTPError wraps a net/http error. - ClientHTTPError - - // ServerRequestFailed covers any other failures from the API - // client. - ServerRequestFailed -) - -// The following are OCSP related errors, and should be -// specified with OCSPError -const ( - // IssuerMismatch ocurs when the certificate in the OCSP signing - // request was not issued by the CA that this responder responds for. - IssuerMismatch Reason = 100 * (iota + 1) // 81XX - - // InvalidStatus occurs when the OCSP signing requests includes an - // invalid value for the certificate status. - InvalidStatus -) - -// Certificate transparency related errors specified with CTError -const ( - // PrecertSubmissionFailed occurs when submitting a precertificate to - // a log server fails - PrecertSubmissionFailed = 100 * (iota + 1) -) - -// Certificate persistence related errors specified with CertStoreError -const ( - // InsertionFailed occurs when a SQL insert query failes to complete. - InsertionFailed = 100 * (iota + 1) - // RecordNotFound occurs when a SQL query targeting on one unique - // record failes to update the specified row in the table. - RecordNotFound -) - -// The error interface implementation, which formats to a JSON object string. -func (e *Error) Error() string { - marshaled, err := json.Marshal(e) - if err != nil { - panic(err) - } - return string(marshaled) - -} - -// New returns an error that contains an error code and message derived from -// the given category, reason. Currently, to avoid confusion, it is not -// allowed to create an error of category Success -func New(category Category, reason Reason) *Error { - errorCode := int(category) + int(reason) - var msg string - switch category { - case OCSPError: - switch reason { - case ReadFailed: - msg = "No certificate provided" - case IssuerMismatch: - msg = "Certificate not issued by this issuer" - case InvalidStatus: - msg = "Invalid revocation status" - } - case CertificateError: - switch reason { - case Unknown: - msg = "Unknown certificate error" - case ReadFailed: - msg = "Failed to read certificate" - case DecodeFailed: - msg = "Failed to decode certificate" - case ParseFailed: - msg = "Failed to parse certificate" - case SelfSigned: - msg = "Certificate is self signed" - case VerifyFailed: - msg = "Unable to verify certificate" - case BadRequest: - msg = "Invalid certificate request" - case MissingSerial: - msg = "Missing serial number in request" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.", - reason)) - - } - case PrivateKeyError: - switch reason { - case Unknown: - msg = "Unknown private key error" - case ReadFailed: - msg = "Failed to read private key" - case DecodeFailed: - msg = "Failed to decode private key" - case ParseFailed: - msg = "Failed to parse private key" - case Encrypted: - msg = "Private key is encrypted." - case NotRSAOrECC: - msg = "Private key algorithm is not RSA or ECC" - case KeyMismatch: - msg = "Private key does not match public key" - case GenerationFailed: - msg = "Failed to new private key" - case Unavailable: - msg = "Private key is unavailable" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.", - reason)) - } - case IntermediatesError: - switch reason { - case Unknown: - msg = "Unknown intermediate certificate error" - case ReadFailed: - msg = "Failed to read intermediate certificate" - case DecodeFailed: - msg = "Failed to decode intermediate certificate" - case ParseFailed: - msg = "Failed to parse intermediate certificate" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.", - reason)) - } - case RootError: - switch reason { - case Unknown: - msg = "Unknown root certificate error" - case ReadFailed: - msg = "Failed to read root certificate" - case DecodeFailed: - msg = "Failed to decode root certificate" - case ParseFailed: - msg = "Failed to parse root certificate" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.", - reason)) - } - case PolicyError: - switch reason { - case Unknown: - msg = "Unknown policy error" - case NoKeyUsages: - msg = "Invalid policy: no key usage available" - case InvalidPolicy: - msg = "Invalid or unknown policy" - case InvalidRequest: - msg = "Policy violation request" - case UnknownProfile: - msg = "Unknown policy profile" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.", - reason)) - } - case DialError: - switch reason { - case Unknown: - msg = "Failed to dial remote server" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.", - reason)) - } - case APIClientError: - switch reason { - case AuthenticationFailure: - msg = "API client authentication failure" - case JSONError: - msg = "API client JSON config error" - case ClientHTTPError: - msg = "API client HTTP error" - case IOError: - msg = "API client IO error" - case ServerRequestFailed: - msg = "API client error: Server request failed" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.", - reason)) - } - case CSRError: - switch reason { - case Unknown: - msg = "CSR parsing failed due to unknown error" - case ReadFailed: - msg = "CSR file read failed" - case ParseFailed: - msg = "CSR Parsing failed" - case DecodeFailed: - msg = "CSR Decode failed" - case BadRequest: - msg = "CSR Bad request" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason)) - } - case CTError: - switch reason { - case Unknown: - msg = "Certificate transparency parsing failed due to unknown error" - case PrecertSubmissionFailed: - msg = "Certificate transparency precertificate submission failed" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason)) - } - case CertStoreError: - switch reason { - case Unknown: - msg = "Certificate store action failed due to unknown error" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason)) - } - - default: - panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", - category)) - } - return &Error{ErrorCode: errorCode, Message: msg} -} - -// Wrap returns an error that contains the given error and an error code derived from -// the given category, reason and the error. Currently, to avoid confusion, it is not -// allowed to create an error of category Success -func Wrap(category Category, reason Reason, err error) *Error { - errorCode := int(category) + int(reason) - if err == nil { - panic("Wrap needs a supplied error to initialize.") - } - - // do not double wrap a error - switch err.(type) { - case *Error: - panic("Unable to wrap a wrapped error.") - } - - switch category { - case CertificateError: - // given VerifyFailed , report the status with more detailed status code - // for some certificate errors we care. - if reason == VerifyFailed { - switch errorType := err.(type) { - case x509.CertificateInvalidError: - errorCode += certificateInvalid + int(errorType.Reason) - case x509.UnknownAuthorityError: - errorCode += unknownAuthority - } - } - case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError, - APIClientError, CSRError, CTError, CertStoreError: - // no-op, just use the error - default: - panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", - category)) - } - - return &Error{ErrorCode: errorCode, Message: err.Error()} - -} diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/http.go b/vendor/src/github.com/cloudflare/cfssl/errors/http.go deleted file mode 100644 index c9c0a39c7..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/http.go +++ /dev/null @@ -1,47 +0,0 @@ -package errors - -import ( - "errors" - "net/http" -) - -// HTTPError is an augmented error with a HTTP status code. -type HTTPError struct { - StatusCode int - error -} - -// Error implements the error interface. -func (e *HTTPError) Error() string { - return e.error.Error() -} - -// NewMethodNotAllowed returns an appropriate error in the case that -// an HTTP client uses an invalid method (i.e. a GET in place of a POST) -// on an API endpoint. -func NewMethodNotAllowed(method string) *HTTPError { - return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)} -} - -// NewBadRequest creates a HttpError with the given error and error code 400. -func NewBadRequest(err error) *HTTPError { - return &HTTPError{http.StatusBadRequest, err} -} - -// NewBadRequestString returns a HttpError with the supplied message -// and error code 400. -func NewBadRequestString(s string) *HTTPError { - return NewBadRequest(errors.New(s)) -} - -// NewBadRequestMissingParameter returns a 400 HttpError as a required -// parameter is missing in the HTTP request. -func NewBadRequestMissingParameter(s string) *HTTPError { - return NewBadRequestString(`Missing parameter "` + s + `"`) -} - -// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary -// parameter is present in the HTTP request. -func NewBadRequestUnwantedParameter(s string) *HTTPError { - return NewBadRequestString(`Unwanted parameter "` + s + `"`) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go deleted file mode 100644 index bcc741850..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package derhelpers implements common functionality -// on DER encoded data -package derhelpers - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - - cferr "github.com/cloudflare/cfssl/errors" -) - -// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve -// DER-encoded private key. The key must not be in PEM format. -func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) { - generalKey, err := x509.ParsePKCS8PrivateKey(keyDER) - if err != nil { - generalKey, err = x509.ParsePKCS1PrivateKey(keyDER) - if err != nil { - generalKey, err = x509.ParseECPrivateKey(keyDER) - if err != nil { - // We don't include the actual error into - // the final error. The reason might be - // we don't want to leak any info about - // the private key. - return nil, cferr.New(cferr.PrivateKeyError, - cferr.ParseFailed) - } - } - } - - switch generalKey.(type) { - case *rsa.PrivateKey: - return generalKey.(*rsa.PrivateKey), nil - case *ecdsa.PrivateKey: - return generalKey.(*ecdsa.PrivateKey), nil - } - - // should never reach here - return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go deleted file mode 100644 index 85b0d4a31..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go +++ /dev/null @@ -1,479 +0,0 @@ -// Package helpers implements utility functionality common to many -// CFSSL packages. -package helpers - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/asn1" - "encoding/pem" - "errors" - "io/ioutil" - "math/big" - - "strings" - "time" - - "github.com/cloudflare/cfssl/crypto/pkcs7" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers/derhelpers" - "github.com/cloudflare/cfssl/log" - "golang.org/x/crypto/pkcs12" -) - -// OneYear is a time.Duration representing a year's worth of seconds. -const OneYear = 8760 * time.Hour - -// OneDay is a time.Duration representing a day's worth of seconds. -const OneDay = 24 * time.Hour - -// InclusiveDate returns the time.Time representation of a date - 1 -// nanosecond. This allows time.After to be used inclusively. -func InclusiveDate(year int, month time.Month, day int) time.Time { - return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond) -} - -// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop -// issuing certificates valid for more than 5 years. -var Jul2012 = InclusiveDate(2012, time.July, 01) - -// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop -// issuing certificates valid for more than 39 months. -var Apr2015 = InclusiveDate(2015, time.April, 01) - -// KeyLength returns the bit size of ECDSA or RSA PublicKey -func KeyLength(key interface{}) int { - if key == nil { - return 0 - } - if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok { - return ecdsaKey.Curve.Params().BitSize - } else if rsaKey, ok := key.(*rsa.PublicKey); ok { - return rsaKey.N.BitLen() - } - - return 0 -} - -// ExpiryTime returns the time when the certificate chain is expired. -func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) { - if len(chain) == 0 { - return - } - - notAfter = chain[0].NotAfter - for _, cert := range chain { - if notAfter.After(cert.NotAfter) { - notAfter = cert.NotAfter - } - } - return -} - -// MonthsValid returns the number of months for which a certificate is valid. -func MonthsValid(c *x509.Certificate) int { - issued := c.NotBefore - expiry := c.NotAfter - years := (expiry.Year() - issued.Year()) - months := years*12 + int(expiry.Month()) - int(issued.Month()) - - // Round up if valid for less than a full month - if expiry.Day() > issued.Day() { - months++ - } - return months -} - -// ValidExpiry determines if a certificate is valid for an acceptable -// length of time per the CA/Browser Forum baseline requirements. -// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf -func ValidExpiry(c *x509.Certificate) bool { - issued := c.NotBefore - - var maxMonths int - switch { - case issued.After(Apr2015): - maxMonths = 39 - case issued.After(Jul2012): - maxMonths = 60 - case issued.Before(Jul2012): - maxMonths = 120 - } - - if MonthsValid(c) > maxMonths { - return false - } - return true -} - -// SignatureString returns the TLS signature string corresponding to -// an X509 signature algorithm. -func SignatureString(alg x509.SignatureAlgorithm) string { - switch alg { - case x509.MD2WithRSA: - return "MD2WithRSA" - case x509.MD5WithRSA: - return "MD5WithRSA" - case x509.SHA1WithRSA: - return "SHA1WithRSA" - case x509.SHA256WithRSA: - return "SHA256WithRSA" - case x509.SHA384WithRSA: - return "SHA384WithRSA" - case x509.SHA512WithRSA: - return "SHA512WithRSA" - case x509.DSAWithSHA1: - return "DSAWithSHA1" - case x509.DSAWithSHA256: - return "DSAWithSHA256" - case x509.ECDSAWithSHA1: - return "ECDSAWithSHA1" - case x509.ECDSAWithSHA256: - return "ECDSAWithSHA256" - case x509.ECDSAWithSHA384: - return "ECDSAWithSHA384" - case x509.ECDSAWithSHA512: - return "ECDSAWithSHA512" - default: - return "Unknown Signature" - } -} - -// HashAlgoString returns the hash algorithm name contains in the signature -// method. -func HashAlgoString(alg x509.SignatureAlgorithm) string { - switch alg { - case x509.MD2WithRSA: - return "MD2" - case x509.MD5WithRSA: - return "MD5" - case x509.SHA1WithRSA: - return "SHA1" - case x509.SHA256WithRSA: - return "SHA256" - case x509.SHA384WithRSA: - return "SHA384" - case x509.SHA512WithRSA: - return "SHA512" - case x509.DSAWithSHA1: - return "SHA1" - case x509.DSAWithSHA256: - return "SHA256" - case x509.ECDSAWithSHA1: - return "SHA1" - case x509.ECDSAWithSHA256: - return "SHA256" - case x509.ECDSAWithSHA384: - return "SHA384" - case x509.ECDSAWithSHA512: - return "SHA512" - default: - return "Unknown Hash Algorithm" - } -} - -// EncodeCertificatesPEM encodes a number of x509 certficates to PEM -func EncodeCertificatesPEM(certs []*x509.Certificate) []byte { - var buffer bytes.Buffer - for _, cert := range certs { - pem.Encode(&buffer, &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }) - } - - return buffer.Bytes() -} - -// EncodeCertificatePEM encodes a single x509 certficates to PEM -func EncodeCertificatePEM(cert *x509.Certificate) []byte { - return EncodeCertificatesPEM([]*x509.Certificate{cert}) -} - -// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them, -// can handle PEM encoded PKCS #7 structures. -func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) { - var certs []*x509.Certificate - var err error - certsPEM = bytes.TrimSpace(certsPEM) - for len(certsPEM) > 0 { - var cert []*x509.Certificate - cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM) - if err != nil { - - return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) - } else if cert == nil { - break - } - - certs = append(certs, cert...) - } - if len(certsPEM) > 0 { - return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - return certs, nil -} - -// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key, -// either PKCS #7, PKCS #12, or raw x509. -func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) { - certsDER = bytes.TrimSpace(certsDER) - pkcs7data, err := pkcs7.ParsePKCS7(certsDER) - if err != nil { - var pkcs12data interface{} - certs = make([]*x509.Certificate, 1) - pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password) - if err != nil { - certs, err = x509.ParseCertificates(certsDER) - if err != nil { - return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - } else { - key = pkcs12data.(crypto.Signer) - } - } else { - if pkcs7data.ContentInfo != "SignedData" { - return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info")) - } - certs = pkcs7data.Content.SignedData.Certificates - } - if certs == nil { - return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - return certs, key, nil -} - -// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed. -func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) { - cert, err := ParseCertificatePEM(certPEM) - if err != nil { - return nil, err - } - - if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err) - } - return cert, nil -} - -// ParseCertificatePEM parses and returns a PEM-encoded certificate, -// can handle PEM encoded PKCS #7 structures. -func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) { - certPEM = bytes.TrimSpace(certPEM) - cert, rest, err := ParseOneCertificateFromPEM(certPEM) - if err != nil { - // Log the actual parsing error but throw a default parse error message. - log.Debugf("Certificate parsing error: %v", err) - return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) - } else if cert == nil { - return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } else if len(rest) > 0 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object")) - } else if len(cert) > 1 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate")) - } - return cert[0], nil -} - -// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object, -// either a raw x509 certificate or a PKCS #7 structure possibly containing -// multiple certificates, from the top of certsPEM, which itself may -// contain multiple PEM encoded certificate objects. -func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) { - - block, rest := pem.Decode(certsPEM) - if block == nil { - return nil, rest, nil - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes) - if err != nil { - return nil, rest, err - } - if pkcs7data.ContentInfo != "SignedData" { - return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing") - } - certs := pkcs7data.Content.SignedData.Certificates - if certs == nil { - return nil, rest, errors.New("PKCS #7 structure contains no certificates") - } - return certs, rest, nil - } - var certs = []*x509.Certificate{cert} - return certs, rest, nil -} - -// LoadPEMCertPool loads a pool of PEM certificates from file. -func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) { - pemCerts, err := ioutil.ReadFile(certsFile) - if err != nil { - return nil, err - } - - certPool := x509.NewCertPool() - if !certPool.AppendCertsFromPEM(pemCerts) { - return nil, errors.New("failed to load cert pool") - } - - return certPool, nil -} - -// ParsePrivateKeyPEM parses and returns a PEM-encoded private -// key. The private key may be either an unencrypted PKCS#8, PKCS#1, -// or elliptic private key. -func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) { - return ParsePrivateKeyPEMWithPassword(keyPEM, nil) -} - -// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private -// key. The private key may be a potentially encrypted PKCS#8, PKCS#1, -// or elliptic private key. -func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) { - keyDER, err := GetKeyDERFromPEM(keyPEM, password) - if err != nil { - return nil, err - } - - return derhelpers.ParsePrivateKeyDER(keyDER) -} - -// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes. -func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) { - keyDER, _ := pem.Decode(in) - if keyDER != nil { - if procType, ok := keyDER.Headers["Proc-Type"]; ok { - if strings.Contains(procType, "ENCRYPTED") { - if password != nil { - return x509.DecryptPEMBlock(keyDER, password) - } - return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted) - } - } - return keyDER.Bytes, nil - } - - return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed) -} - -// CheckSignature verifies a signature made by the key on a CSR, such -// as on the CSR itself. -func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error { - var hashType crypto.Hash - - switch algo { - case x509.SHA1WithRSA, x509.ECDSAWithSHA1: - hashType = crypto.SHA1 - case x509.SHA256WithRSA, x509.ECDSAWithSHA256: - hashType = crypto.SHA256 - case x509.SHA384WithRSA, x509.ECDSAWithSHA384: - hashType = crypto.SHA384 - case x509.SHA512WithRSA, x509.ECDSAWithSHA512: - hashType = crypto.SHA512 - default: - return x509.ErrUnsupportedAlgorithm - } - - if !hashType.Available() { - return x509.ErrUnsupportedAlgorithm - } - h := hashType.New() - - h.Write(signed) - digest := h.Sum(nil) - - switch pub := csr.PublicKey.(type) { - case *rsa.PublicKey: - return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) - case *ecdsa.PublicKey: - ecdsaSig := new(struct{ R, S *big.Int }) - if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { - return err - } - if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { - return errors.New("x509: ECDSA signature contained zero or negative values") - } - if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { - return errors.New("x509: ECDSA verification failure") - } - return nil - } - return x509.ErrUnsupportedAlgorithm -} - -// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request. -func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) { - in = bytes.TrimSpace(in) - p, rest := pem.Decode(in) - if p != nil { - if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" { - return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest) - } - - csr, err = x509.ParseCertificateRequest(p.Bytes) - } else { - csr, err = x509.ParseCertificateRequest(in) - } - - if err != nil { - return nil, rest, err - } - - err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature) - if err != nil { - return nil, rest, err - } - - return csr, rest, nil -} - -// ParseCSRPEM parses a PEM-encoded certificiate signing request. -// It does not check the signature. This is useful for dumping data from a CSR -// locally. -func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) { - block, _ := pem.Decode([]byte(csrPEM)) - der := block.Bytes - csrObject, err := x509.ParseCertificateRequest(der) - - if err != nil { - return nil, err - } - - return csrObject, nil -} - -// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer. -func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm { - switch pub := priv.Public().(type) { - case *rsa.PublicKey: - bitLength := pub.N.BitLen() - switch { - case bitLength >= 4096: - return x509.SHA512WithRSA - case bitLength >= 3072: - return x509.SHA384WithRSA - case bitLength >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P521(): - return x509.ECDSAWithSHA512 - case elliptic.P384(): - return x509.ECDSAWithSHA384 - case elliptic.P256(): - return x509.ECDSAWithSHA256 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} diff --git a/vendor/src/github.com/cloudflare/cfssl/info/info.go b/vendor/src/github.com/cloudflare/cfssl/info/info.go deleted file mode 100644 index 926a411ff..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/info/info.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package info contains the definitions for the info endpoint -package info - -// Req is the request struct for an info API request. -type Req struct { - Label string `json:"label"` - Profile string `json:"profile"` -} - -// Resp is the response for an Info API request. -type Resp struct { - Certificate string `json:"certificate"` - Usage []string `json:"usages"` - ExpiryString string `json:"expiry"` -} diff --git a/vendor/src/github.com/cloudflare/cfssl/initca/initca.go b/vendor/src/github.com/cloudflare/cfssl/initca/initca.go deleted file mode 100644 index 320ffb70b..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/initca/initca.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package initca contains code to initialise a certificate authority, -// generating a new root key and certificate. -package initca - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "errors" - "io/ioutil" - "time" - - "github.com/cloudflare/cfssl/config" - "github.com/cloudflare/cfssl/csr" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" - "github.com/cloudflare/cfssl/signer" - "github.com/cloudflare/cfssl/signer/local" -) - -// validator contains the default validation logic for certificate -// authority certificates. The only requirement here is that the -// certificate have a non-empty subject field. -func validator(req *csr.CertificateRequest) error { - if req.CN != "" { - return nil - } - - if len(req.Names) == 0 { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) - } - - for i := range req.Names { - if csr.IsNameEmpty(req.Names[i]) { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) - } - } - - return nil -} - -// New creates a new root certificate from the certificate request. -func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) { - policy := CAPolicy() - if req.CA != nil { - if req.CA.Expiry != "" { - policy.Default.ExpiryString = req.CA.Expiry - policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) - } - - signer.MaxPathLen = req.CA.PathLength - if req.CA.PathLength != 0 && req.CA.PathLenZero == true { - log.Infof("ignore invalid 'pathlenzero' value") - } else { - signer.MaxPathLenZero = req.CA.PathLenZero - } - } - - g := &csr.Generator{Validator: validator} - csrPEM, key, err = g.ProcessRequest(req) - if err != nil { - log.Errorf("failed to process request: %v", err) - key = nil - return - } - - priv, err := helpers.ParsePrivateKeyPEM(key) - if err != nil { - log.Errorf("failed to parse private key: %v", err) - return - } - - s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) - if err != nil { - log.Errorf("failed to create signer: %v", err) - return - } - s.SetPolicy(policy) - - signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)} - cert, err = s.Sign(signReq) - - return - -} - -// NewFromPEM creates a new root certificate from the key file passed in. -func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) { - privData, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, nil, err - } - - priv, err := helpers.ParsePrivateKeyPEM(privData) - if err != nil { - return nil, nil, err - } - - return NewFromSigner(req, priv) -} - -// RenewFromPEM re-creates a root certificate from the CA cert and key -// files. The resulting root certificate will have the input CA certificate -// as the template and have the same expiry length. E.g. the exsiting CA -// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate -// will be valid from now and expire in one year as well. -func RenewFromPEM(caFile, keyFile string) ([]byte, error) { - caBytes, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, err - } - - ca, err := helpers.ParseCertificatePEM(caBytes) - if err != nil { - return nil, err - } - - keyBytes, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, err - } - - key, err := helpers.ParsePrivateKeyPEM(keyBytes) - if err != nil { - return nil, err - } - - return RenewFromSigner(ca, key) - -} - -// NewFromSigner creates a new root certificate from a crypto.Signer. -func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) { - policy := CAPolicy() - if req.CA != nil { - if req.CA.Expiry != "" { - policy.Default.ExpiryString = req.CA.Expiry - policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) - if err != nil { - return nil, nil, err - } - } - - signer.MaxPathLen = req.CA.PathLength - if req.CA.PathLength != 0 && req.CA.PathLenZero == true { - log.Infof("ignore invalid 'pathlenzero' value") - } else { - signer.MaxPathLenZero = req.CA.PathLenZero - } - } - - csrPEM, err = csr.Generate(priv, req) - if err != nil { - return nil, nil, err - } - - s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) - if err != nil { - log.Errorf("failed to create signer: %v", err) - return - } - s.SetPolicy(policy) - - signReq := signer.SignRequest{Request: string(csrPEM)} - cert, err = s.Sign(signReq) - return -} - -// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer. -// The resulting root certificate will have ca certificate -// as the template and have the same expiry length. E.g. the exsiting CA -// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate -// will be valid from now and expire in one year as well. -func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) { - if !ca.IsCA { - return nil, errors.New("input certificate is not a CA cert") - } - - // matching certificate public key vs private key - switch { - case ca.PublicKeyAlgorithm == x509.RSA: - - var rsaPublicKey *rsa.PublicKey - var ok bool - if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - case ca.PublicKeyAlgorithm == x509.ECDSA: - var ecdsaPublicKey *ecdsa.PublicKey - var ok bool - if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - default: - return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC) - } - - req := csr.ExtractCertificateRequest(ca) - - cert, _, err := NewFromSigner(req, priv) - return cert, err - -} - -// CAPolicy contains the CA issuing policy as default policy. -var CAPolicy = func() *config.Signing { - return &config.Signing{ - Default: &config.SigningProfile{ - Usage: []string{"cert sign", "crl sign"}, - ExpiryString: "43800h", - Expiry: 5 * helpers.OneYear, - CA: true, - }, - } -} diff --git a/vendor/src/github.com/cloudflare/cfssl/log/log.go b/vendor/src/github.com/cloudflare/cfssl/log/log.go deleted file mode 100644 index 4ceacc9e2..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/log/log.go +++ /dev/null @@ -1,170 +0,0 @@ -// Package log implements a wrapper around the Go standard library's -// logging package. Clients should set the current log level; only -// messages below that level will actually be logged. For example, if -// Level is set to LevelWarning, only log messages at the Warning, -// Error, and Critical levels will be logged. -package log - -import ( - "flag" - "fmt" - "log" - "os" -) - -// The following constants represent logging levels in increasing levels of seriousness. -const ( - // LevelDebug is the log level for Debug statements. - LevelDebug = iota - // LevelInfo is the log level for Info statements. - LevelInfo - // LevelWarning is the log level for Warning statements. - LevelWarning - // LevelError is the log level for Error statements. - LevelError - // LevelCritical is the log level for Critical statements. - LevelCritical - // LevelFatal is the log level for Fatal statements. - LevelFatal -) - -var levelPrefix = [...]string{ - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarning: "WARNING", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Level stores the current logging level. -var Level = LevelInfo - -// SyslogWriter specifies the necessary methods for an alternate output -// destination passed in via SetLogger. -// -// SyslogWriter is satisfied by *syslog.Writer. -type SyslogWriter interface { - Debug(string) - Info(string) - Warning(string) - Err(string) - Crit(string) - Emerg(string) -} - -// syslogWriter stores the SetLogger() parameter. -var syslogWriter SyslogWriter - -// SetLogger sets the output used for output by this package. -// A *syslog.Writer is a good choice for the logger parameter. -// Call with a nil parameter to revert to default behavior. -func SetLogger(logger SyslogWriter) { - syslogWriter = logger -} - -func init() { - // Only define loglevel flag once. - if flag.Lookup("loglevel") == nil { - flag.IntVar(&Level, "loglevel", LevelInfo, "Log level (0 = DEBUG, 5 = FATAL)") - } -} - -func print(l int, msg string) { - if l >= Level { - if syslogWriter != nil { - switch l { - case LevelDebug: - syslogWriter.Debug(msg) - case LevelInfo: - syslogWriter.Info(msg) - case LevelWarning: - syslogWriter.Warning(msg) - case LevelError: - syslogWriter.Err(msg) - case LevelCritical: - syslogWriter.Crit(msg) - case LevelFatal: - syslogWriter.Emerg(msg) - } - } else { - log.Printf("[%s] %s", levelPrefix[l], msg) - } - } -} - -func outputf(l int, format string, v []interface{}) { - print(l, fmt.Sprintf(format, v...)) -} - -func output(l int, v []interface{}) { - print(l, fmt.Sprint(v...)) -} - -// Fatalf logs a formatted message at the "fatal" level and then exits. The -// arguments are handled in the same manner as fmt.Printf. -func Fatalf(format string, v ...interface{}) { - outputf(LevelFatal, format, v) - os.Exit(1) -} - -// Fatal logs its arguments at the "fatal" level and then exits. -func Fatal(v ...interface{}) { - output(LevelFatal, v) - os.Exit(1) -} - -// Criticalf logs a formatted message at the "critical" level. The -// arguments are handled in the same manner as fmt.Printf. -func Criticalf(format string, v ...interface{}) { - outputf(LevelCritical, format, v) -} - -// Critical logs its arguments at the "critical" level. -func Critical(v ...interface{}) { - output(LevelCritical, v) -} - -// Errorf logs a formatted message at the "error" level. The arguments -// are handled in the same manner as fmt.Printf. -func Errorf(format string, v ...interface{}) { - outputf(LevelError, format, v) -} - -// Error logs its arguments at the "error" level. -func Error(v ...interface{}) { - output(LevelError, v) -} - -// Warningf logs a formatted message at the "warning" level. The -// arguments are handled in the same manner as fmt.Printf. -func Warningf(format string, v ...interface{}) { - outputf(LevelWarning, format, v) -} - -// Warning logs its arguments at the "warning" level. -func Warning(v ...interface{}) { - output(LevelWarning, v) -} - -// Infof logs a formatted message at the "info" level. The arguments -// are handled in the same manner as fmt.Printf. -func Infof(format string, v ...interface{}) { - outputf(LevelInfo, format, v) -} - -// Info logs its arguments at the "info" level. -func Info(v ...interface{}) { - output(LevelInfo, v) -} - -// Debugf logs a formatted message at the "debug" level. The arguments -// are handled in the same manner as fmt.Printf. -func Debugf(format string, v ...interface{}) { - outputf(LevelDebug, format, v) -} - -// Debug logs its arguments at the "debug" level. -func Debug(v ...interface{}) { - output(LevelDebug, v) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go deleted file mode 100644 index a19b113d4..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package config in the ocsp directory provides configuration data for an OCSP -// signer. -package config - -import "time" - -// Config contains configuration information required to set up an OCSP signer. -type Config struct { - CACertFile string - ResponderCertFile string - KeyFile string - Interval time.Duration -} diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go deleted file mode 100644 index d565bc4f4..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go +++ /dev/null @@ -1,469 +0,0 @@ -// Package local implements certificate signature functionality for CFSSL. -package local - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "io" - "io/ioutil" - "math/big" - "net" - "net/mail" - "os" - - "github.com/cloudflare/cfssl/certdb" - "github.com/cloudflare/cfssl/config" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/info" - "github.com/cloudflare/cfssl/log" - "github.com/cloudflare/cfssl/signer" - "github.com/google/certificate-transparency/go" - "github.com/google/certificate-transparency/go/client" -) - -// Signer contains a signer that uses the standard library to -// support both ECDSA and RSA CA keys. -type Signer struct { - ca *x509.Certificate - priv crypto.Signer - policy *config.Signing - sigAlgo x509.SignatureAlgorithm - dbAccessor certdb.Accessor -} - -// NewSigner creates a new Signer directly from a -// private key and certificate, with optional policy. -func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) { - if policy == nil { - policy = &config.Signing{ - Profiles: map[string]*config.SigningProfile{}, - Default: config.DefaultConfig()} - } - - if !policy.Valid() { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - - return &Signer{ - ca: cert, - priv: priv, - sigAlgo: sigAlgo, - policy: policy, - }, nil -} - -// NewSignerFromFile generates a new local signer from a caFile -// and a caKey file, both PEM encoded. -func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) { - log.Debug("Loading CA: ", caFile) - ca, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, err - } - log.Debug("Loading CA key: ", caKeyFile) - cakey, err := ioutil.ReadFile(caKeyFile) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err) - } - - parsedCa, err := helpers.ParseCertificatePEM(ca) - if err != nil { - return nil, err - } - - strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD") - password := []byte(strPassword) - if strPassword == "" { - password = nil - } - - priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password) - if err != nil { - log.Debug("Malformed private key %v", err) - return nil, err - } - - return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy) -} - -func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) { - var distPoints = template.CRLDistributionPoints - err = signer.FillTemplate(template, s.policy.Default, profile) - if distPoints != nil && len(distPoints) > 0 { - template.CRLDistributionPoints = distPoints - } - if err != nil { - return - } - - var initRoot bool - if s.ca == nil { - if !template.IsCA { - err = cferr.New(cferr.PolicyError, cferr.InvalidRequest) - return - } - template.DNSNames = nil - template.EmailAddresses = nil - s.ca = template - initRoot = true - } else if template.IsCA { - template.DNSNames = nil - template.EmailAddresses = nil - } - - derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) - } - if initRoot { - s.ca, err = x509.ParseCertificate(derBytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - - cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - log.Infof("signed certificate with serial number %d", template.SerialNumber) - return -} - -// replaceSliceIfEmpty replaces the contents of replaced with newContents if -// the slice referenced by replaced is empty -func replaceSliceIfEmpty(replaced, newContents *[]string) { - if len(*replaced) == 0 { - *replaced = *newContents - } -} - -// PopulateSubjectFromCSR has functionality similar to Name, except -// it fills the fields of the resulting pkix.Name with req's if the -// subject's corresponding fields are empty -func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name { - // if no subject, use req - if s == nil { - return req - } - - name := s.Name() - - if name.CommonName == "" { - name.CommonName = req.CommonName - } - - replaceSliceIfEmpty(&name.Country, &req.Country) - replaceSliceIfEmpty(&name.Province, &req.Province) - replaceSliceIfEmpty(&name.Locality, &req.Locality) - replaceSliceIfEmpty(&name.Organization, &req.Organization) - replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit) - if name.SerialNumber == "" { - name.SerialNumber = req.SerialNumber - } - return name -} - -// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the -// content of hosts, if it is not nil. -func OverrideHosts(template *x509.Certificate, hosts []string) { - if hosts != nil { - template.IPAddresses = []net.IP{} - template.EmailAddresses = []string{} - template.DNSNames = []string{} - } - - for i := range hosts { - if ip := net.ParseIP(hosts[i]); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil { - template.EmailAddresses = append(template.EmailAddresses, email.Address) - } else { - template.DNSNames = append(template.DNSNames, hosts[i]) - } - } - -} - -// Sign signs a new certificate based on the PEM-encoded client -// certificate or certificate request with the signing profile, -// specified by profileName. -func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) { - profile, err := signer.Profile(s, req.Profile) - if err != nil { - return - } - - block, _ := pem.Decode([]byte(req.Request)) - if block == nil { - return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed) - } - - if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" { - return nil, cferr.Wrap(cferr.CSRError, - cferr.BadRequest, errors.New("not a certificate or csr")) - } - - csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes) - if err != nil { - return nil, err - } - - // Copy out only the fields from the CSR authorized by policy. - safeTemplate := x509.Certificate{} - // If the profile contains no explicit whitelist, assume that all fields - // should be copied from the CSR. - if profile.CSRWhitelist == nil { - safeTemplate = *csrTemplate - } else { - if profile.CSRWhitelist.Subject { - safeTemplate.Subject = csrTemplate.Subject - } - if profile.CSRWhitelist.PublicKeyAlgorithm { - safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm - } - if profile.CSRWhitelist.PublicKey { - safeTemplate.PublicKey = csrTemplate.PublicKey - } - if profile.CSRWhitelist.SignatureAlgorithm { - safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm - } - if profile.CSRWhitelist.DNSNames { - safeTemplate.DNSNames = csrTemplate.DNSNames - } - if profile.CSRWhitelist.IPAddresses { - safeTemplate.IPAddresses = csrTemplate.IPAddresses - } - if profile.CSRWhitelist.EmailAddresses { - safeTemplate.EmailAddresses = csrTemplate.EmailAddresses - } - } - - if req.CRLOverride != "" { - safeTemplate.CRLDistributionPoints = []string{req.CRLOverride} - } - - if safeTemplate.IsCA { - if !profile.CA { - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - - if s.ca != nil && s.ca.MaxPathLen > 0 { - if safeTemplate.MaxPathLen >= s.ca.MaxPathLen { - // do not sign a cert with pathlen > current - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - } else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero { - // signer has pathlen of 0, do not sign more intermediate CAs - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - } - - OverrideHosts(&safeTemplate, req.Hosts) - safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject) - - // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match - if profile.NameWhitelist != nil { - if safeTemplate.Subject.CommonName != "" { - if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - for _, name := range safeTemplate.DNSNames { - if profile.NameWhitelist.Find([]byte(name)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - for _, name := range safeTemplate.EmailAddresses { - if profile.NameWhitelist.Find([]byte(name)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - } - - if profile.ClientProvidesSerialNumbers { - if req.Serial == nil { - return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial) - } - safeTemplate.SerialNumber = req.Serial - } else { - // RFC 5280 4.1.2.2: - // Certificate users MUST be able to handle serialNumber - // values up to 20 octets. Conforming CAs MUST NOT use - // serialNumber values longer than 20 octets. - // - // If CFSSL is providing the serial numbers, it makes - // sense to use the max supported size. - serialNumber := make([]byte, 20) - _, err = io.ReadFull(rand.Reader, serialNumber) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) - } - - // SetBytes interprets buf as the bytes of a big-endian - // unsigned integer. The leading byte should be masked - // off to ensure it isn't negative. - serialNumber[0] &= 0x7F - - safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber) - } - - if len(req.Extensions) > 0 { - for _, ext := range req.Extensions { - oid := asn1.ObjectIdentifier(ext.ID) - if !profile.ExtensionWhitelist[oid.String()] { - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - - rawValue, err := hex.DecodeString(ext.Value) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err) - } - - safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{ - Id: oid, - Critical: ext.Critical, - Value: rawValue, - }) - } - } - - var certTBS = safeTemplate - - if len(profile.CTLogServers) > 0 { - // Add a poison extension which prevents validation - var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}} - var poisonedPreCert = certTBS - poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension) - cert, err = s.sign(&poisonedPreCert, profile) - if err != nil { - return - } - - derCert, _ := pem.Decode(cert) - prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw} - var sctList []ct.SignedCertificateTimestamp - - for _, server := range profile.CTLogServers { - log.Infof("submitting poisoned precertificate to %s", server) - var ctclient = client.New(server) - var resp *ct.SignedCertificateTimestamp - resp, err = ctclient.AddPreChain(prechain) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err) - } - sctList = append(sctList, *resp) - } - - var serializedSCTList []byte - serializedSCTList, err = serializeSCTList(sctList) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) - } - - // Serialize again as an octet string before embedding - serializedSCTList, err = asn1.Marshal(serializedSCTList) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) - } - - var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList} - certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension) - } - var signedCert []byte - signedCert, err = s.sign(&certTBS, profile) - if err != nil { - return nil, err - } - - if s.dbAccessor != nil { - var certRecord = certdb.CertificateRecord{ - Serial: certTBS.SerialNumber.String(), - // this relies on the specific behavior of x509.CreateCertificate - // which updates certTBS AuthorityKeyId from the signer's SubjectKeyId - AKI: hex.EncodeToString(certTBS.AuthorityKeyId), - CALabel: req.Label, - Status: "good", - Expiry: certTBS.NotAfter, - PEM: string(signedCert), - } - - err = s.dbAccessor.InsertCertificate(certRecord) - if err != nil { - return nil, err - } - log.Debug("saved certificate with serial number ", certTBS.SerialNumber) - } - - return signedCert, nil -} - -func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) { - var buf bytes.Buffer - for _, sct := range sctList { - sct, err := ct.SerializeSCT(sct) - if err != nil { - return nil, err - } - binary.Write(&buf, binary.BigEndian, uint16(len(sct))) - buf.Write(sct) - } - - var sctListLengthField = make([]byte, 2) - binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len())) - return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil -} - -// Info return a populated info.Resp struct or an error. -func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) { - cert, err := s.Certificate(req.Label, req.Profile) - if err != nil { - return - } - - profile, err := signer.Profile(s, req.Profile) - if err != nil { - return - } - - resp = new(info.Resp) - if cert.Raw != nil { - resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))) - } - resp.Usage = profile.Usage - resp.ExpiryString = profile.ExpiryString - - return -} - -// SigAlgo returns the RSA signer's signature algorithm. -func (s *Signer) SigAlgo() x509.SignatureAlgorithm { - return s.sigAlgo -} - -// Certificate returns the signer's certificate. -func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) { - cert := *s.ca - return &cert, nil -} - -// SetPolicy sets the signer's signature policy. -func (s *Signer) SetPolicy(policy *config.Signing) { - s.policy = policy -} - -// SetDBAccessor sets the signers' cert db accessor -func (s *Signer) SetDBAccessor(dba certdb.Accessor) { - s.dbAccessor = dba -} - -// Policy returns the signer's policy. -func (s *Signer) Policy() *config.Signing { - return s.policy -} diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/signer.go b/vendor/src/github.com/cloudflare/cfssl/signer/signer.go deleted file mode 100644 index 6b0c3ea6f..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/signer/signer.go +++ /dev/null @@ -1,410 +0,0 @@ -// Package signer implements certificate signature functionality for CFSSL. -package signer - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "math/big" - "strings" - "time" - - "github.com/cloudflare/cfssl/certdb" - "github.com/cloudflare/cfssl/config" - "github.com/cloudflare/cfssl/csr" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/info" -) - -// MaxPathLen is the default path length for a new CA certificate. -var MaxPathLen = 2 - -// MaxPathLenZero indicates whether a new CA certificate has pathlen=0 -var MaxPathLenZero = false - -// Subject contains the information that should be used to override the -// subject information when signing a certificate. -type Subject struct { - CN string - Names []csr.Name `json:"names"` - SerialNumber string -} - -// Extension represents a raw extension to be included in the certificate. The -// "value" field must be hex encoded. -type Extension struct { - ID config.OID `json:"id"` - Critical bool `json:"critical"` - Value string `json:"value"` -} - -// SignRequest stores a signature request, which contains the hostname, -// the CSR, optional subject information, and the signature profile. -// -// Extensions provided in the signRequest are copied into the certificate, as -// long as they are in the ExtensionWhitelist for the signer's policy. -// Extensions requested in the CSR are ignored, except for those processed by -// ParseCertificateRequest (mainly subjectAltName). -type SignRequest struct { - Hosts []string `json:"hosts"` - Request string `json:"certificate_request"` - Subject *Subject `json:"subject,omitempty"` - Profile string `json:"profile"` - CRLOverride string `json:"crl_override"` - Label string `json:"label"` - Serial *big.Int `json:"serial,omitempty"` - Extensions []Extension `json:"extensions,omitempty"` -} - -// appendIf appends to a if s is not an empty string. -func appendIf(s string, a *[]string) { - if s != "" { - *a = append(*a, s) - } -} - -// Name returns the PKIX name for the subject. -func (s *Subject) Name() pkix.Name { - var name pkix.Name - name.CommonName = s.CN - - for _, n := range s.Names { - appendIf(n.C, &name.Country) - appendIf(n.ST, &name.Province) - appendIf(n.L, &name.Locality) - appendIf(n.O, &name.Organization) - appendIf(n.OU, &name.OrganizationalUnit) - } - name.SerialNumber = s.SerialNumber - return name -} - -// SplitHosts takes a comma-spearated list of hosts and returns a slice -// with the hosts split -func SplitHosts(hostList string) []string { - if hostList == "" { - return nil - } - - return strings.Split(hostList, ",") -} - -// A Signer contains a CA's certificate and private key for signing -// certificates, a Signing policy to refer to and a SignatureAlgorithm. -type Signer interface { - Info(info.Req) (*info.Resp, error) - Policy() *config.Signing - SetDBAccessor(certdb.Accessor) - SetPolicy(*config.Signing) - SigAlgo() x509.SignatureAlgorithm - Sign(req SignRequest) (cert []byte, err error) -} - -// Profile gets the specific profile from the signer -func Profile(s Signer, profile string) (*config.SigningProfile, error) { - var p *config.SigningProfile - policy := s.Policy() - if policy != nil && policy.Profiles != nil && profile != "" { - p = policy.Profiles[profile] - } - - if p == nil && policy != nil { - p = policy.Default - } - - if p == nil { - return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil")) - } - return p, nil -} - -// DefaultSigAlgo returns an appropriate X.509 signature algorithm given -// the CA's private key. -func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm { - pub := priv.Public() - switch pub := pub.(type) { - case *rsa.PublicKey: - keySize := pub.N.BitLen() - switch { - case keySize >= 4096: - return x509.SHA512WithRSA - case keySize >= 3072: - return x509.SHA384WithRSA - case keySize >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P256(): - return x509.ECDSAWithSHA256 - case elliptic.P384(): - return x509.ECDSAWithSHA384 - case elliptic.P521(): - return x509.ECDSAWithSHA512 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} - -// ParseCertificateRequest takes an incoming certificate request and -// builds a certificate template from it. -func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) { - csrv, err := x509.ParseCertificateRequest(csrBytes) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) - return - } - - err = helpers.CheckSignature(csrv, csrv.SignatureAlgorithm, csrv.RawTBSCertificateRequest, csrv.Signature) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err) - return - } - - template = &x509.Certificate{ - Subject: csrv.Subject, - PublicKeyAlgorithm: csrv.PublicKeyAlgorithm, - PublicKey: csrv.PublicKey, - SignatureAlgorithm: s.SigAlgo(), - DNSNames: csrv.DNSNames, - IPAddresses: csrv.IPAddresses, - EmailAddresses: csrv.EmailAddresses, - } - - for _, val := range csrv.Extensions { - // Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9) - // extension and append to template if necessary - if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) { - var constraints csr.BasicConstraints - var rest []byte - - if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil { - return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) - } else if len(rest) != 0 { - return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints")) - } - - template.BasicConstraintsValid = true - template.IsCA = constraints.IsCA - template.MaxPathLen = constraints.MaxPathLen - template.MaxPathLenZero = template.MaxPathLen == 0 - } - } - - return -} - -type subjectPublicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString -} - -// ComputeSKI derives an SKI from the certificate's public key in a -// standard manner. This is done by computing the SHA-1 digest of the -// SubjectPublicKeyInfo component of the certificate. -func ComputeSKI(template *x509.Certificate) ([]byte, error) { - pub := template.PublicKey - encodedPub, err := x509.MarshalPKIXPublicKey(pub) - if err != nil { - return nil, err - } - - var subPKI subjectPublicKeyInfo - _, err = asn1.Unmarshal(encodedPub, &subPKI) - if err != nil { - return nil, err - } - - pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) - return pubHash[:], nil -} - -// FillTemplate is a utility function that tries to load as much of -// the certificate template as possible from the profiles and current -// template. It fills in the key uses, expiration, revocation URLs -// and SKI. -func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error { - ski, err := ComputeSKI(template) - - var ( - eku []x509.ExtKeyUsage - ku x509.KeyUsage - backdate time.Duration - expiry time.Duration - notBefore time.Time - notAfter time.Time - crlURL, ocspURL string - issuerURL = profile.IssuerURL - ) - - // The third value returned from Usages is a list of unknown key usages. - // This should be used when validating the profile at load, and isn't used - // here. - ku, eku, _ = profile.Usages() - if profile.IssuerURL == nil { - issuerURL = defaultProfile.IssuerURL - } - - if ku == 0 && len(eku) == 0 { - return cferr.New(cferr.PolicyError, cferr.NoKeyUsages) - } - - if expiry = profile.Expiry; expiry == 0 { - expiry = defaultProfile.Expiry - } - - if crlURL = profile.CRL; crlURL == "" { - crlURL = defaultProfile.CRL - } - if ocspURL = profile.OCSP; ocspURL == "" { - ocspURL = defaultProfile.OCSP - } - if backdate = profile.Backdate; backdate == 0 { - backdate = -5 * time.Minute - } else { - backdate = -1 * profile.Backdate - } - - if !profile.NotBefore.IsZero() { - notBefore = profile.NotBefore.UTC() - } else { - notBefore = time.Now().Round(time.Minute).Add(backdate).UTC() - } - - if !profile.NotAfter.IsZero() { - notAfter = profile.NotAfter.UTC() - } else { - notAfter = notBefore.Add(expiry).UTC() - } - - template.NotBefore = notBefore - template.NotAfter = notAfter - template.KeyUsage = ku - template.ExtKeyUsage = eku - template.BasicConstraintsValid = true - template.IsCA = profile.CA - template.SubjectKeyId = ski - - if ocspURL != "" { - template.OCSPServer = []string{ocspURL} - } - if crlURL != "" { - template.CRLDistributionPoints = []string{crlURL} - } - - if len(issuerURL) != 0 { - template.IssuingCertificateURL = issuerURL - } - if len(profile.Policies) != 0 { - err = addPolicies(template, profile.Policies) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - } - if profile.OCSPNoCheck { - ocspNoCheckExtension := pkix.Extension{ - Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5}, - Critical: false, - Value: []byte{0x05, 0x00}, - } - template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension) - } - - return nil -} - -type policyInformation struct { - PolicyIdentifier asn1.ObjectIdentifier - Qualifiers []interface{} `asn1:"tag:optional,omitempty"` -} - -type cpsPolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier string `asn1:"tag:optional,ia5"` -} - -type userNotice struct { - ExplicitText string `asn1:"tag:optional,utf8"` -} -type userNoticePolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier userNotice -} - -var ( - // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents: - // iso(1) identified-organization(3) dod(6) internet(1) security(5) - // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1) - iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} - // iso(1) identified-organization(3) dod(6) internet(1) security(5) - // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2) - iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} - - // CTPoisonOID is the object ID of the critical poison extension for precertificates - // https://tools.ietf.org/html/rfc6962#page-9 - CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} - - // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension - // https://tools.ietf.org/html/rfc6962#page-14 - SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} -) - -// addPolicies adds Certificate Policies and optional Policy Qualifiers to a -// certificate, based on the input config. Go's x509 library allows setting -// Certificate Policies easily, but does not support nested Policy Qualifiers -// under those policies. So we need to construct the ASN.1 structure ourselves. -func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error { - asn1PolicyList := []policyInformation{} - - for _, policy := range policies { - pi := policyInformation{ - // The PolicyIdentifier is an OID assigned to a given issuer. - PolicyIdentifier: asn1.ObjectIdentifier(policy.ID), - } - for _, qualifier := range policy.Qualifiers { - switch qualifier.Type { - case "id-qt-unotice": - pi.Qualifiers = append(pi.Qualifiers, - userNoticePolicyQualifier{ - PolicyQualifierID: iDQTUserNotice, - Qualifier: userNotice{ - ExplicitText: qualifier.Value, - }, - }) - case "id-qt-cps": - pi.Qualifiers = append(pi.Qualifiers, - cpsPolicyQualifier{ - PolicyQualifierID: iDQTCertificationPracticeStatement, - Qualifier: qualifier.Value, - }) - default: - return errors.New("Invalid qualifier type in Policies " + qualifier.Type) - } - } - asn1PolicyList = append(asn1PolicyList, pi) - } - - asn1Bytes, err := asn1.Marshal(asn1PolicyList) - if err != nil { - return err - } - - template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ - Id: asn1.ObjectIdentifier{2, 5, 29, 32}, - Critical: false, - Value: asn1Bytes, - }) - return nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE b/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE deleted file mode 100644 index 2387f3026..000000000 --- a/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014 Kyle Isom - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/src/github.com/coreos/etcd/LICENSE b/vendor/src/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/src/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/coreos/etcd/client/README.md b/vendor/src/github.com/coreos/etcd/client/README.md deleted file mode 100644 index e9e4be468..000000000 --- a/vendor/src/github.com/coreos/etcd/client/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) - -## Install - -```bash -go get github.com/coreos/etcd/client -``` - -## Usage - -```go -package main - -import ( - "log" - "time" - - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" - "github.com/coreos/etcd/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265). diff --git a/vendor/src/github.com/coreos/etcd/client/auth_role.go b/vendor/src/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index 0f6748bdf..000000000 --- a/vendor/src/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/net/context" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/auth_user.go b/vendor/src/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 0b0e09155..000000000 --- a/vendor/src/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type userName struct { - User string `json:"user"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/cancelreq.go b/vendor/src/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index fefdb40e4..000000000 --- a/vendor/src/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -// +build go1.5 - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go b/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go deleted file mode 100644 index 2bed38a41..000000000 --- a/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq_go14.go - -// +build !go1.5 - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - return func() { - tr.CancelRequest(req) - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/client.go b/vendor/src/github.com/coreos/etcd/client/client.go deleted file mode 100644 index 2aaa112ed..000000000 --- a/vendor/src/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "reflect" - "sort" - "strconv" - "sync" - "time" - - "golang.org/x/net/context" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint() (string, error) { - mAPI := NewMembersAPI(c) - leader, err := mAPI.Leader(context.Background()) - if err != nil { - return "", err - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - if len(eps) == 0 { - return ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return err - } - neps[i] = *u - } - - switch c.selectionMode { - case EndpointSelectionRandom: - c.endpoints = shuffleEndpoints(c.rand, neps) - c.pinned = 0 - case EndpointSelectionPrioritizeLeader: - c.endpoints = neps - lep, err := c.getLeaderEndpoint() - if err != nil { - return ErrNoLeaderEndpoint - } - - for i := range c.endpoints { - if c.endpoints[i].String() == lep { - c.pinned = i - break - } - } - // If endpoints doesn't have the lu, just keep c.pinned = 0. - // Forwarding between follower and leader would be required but it works. - default: - return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode)) - } - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - continue - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - eps := make([]string, 0) - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - sort.Sort(sort.StringSlice(eps)) - - ceps := make([]string, len(c.endpoints)) - for i, cep := range c.endpoints { - ceps[i] = cep.String() - } - sort.Sort(sort.StringSlice(ceps)) - // fast path if no change happens - // this helps client to pin the endpoint when no cluster change - if reflect.DeepEqual(eps, ceps) { - return nil - } - - return c.SetEndpoints(eps) -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} diff --git a/vendor/src/github.com/coreos/etcd/client/cluster_error.go b/vendor/src/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 957ed4624..000000000 --- a/vendor/src/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - return ErrClusterUnavailable.Error() -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/vendor/src/github.com/coreos/etcd/client/curl.go b/vendor/src/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index 5a5a69a94..000000000 --- a/vendor/src/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/discover.go b/vendor/src/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index ae88659f4..000000000 --- a/vendor/src/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} diff --git a/vendor/src/github.com/coreos/etcd/client/doc.go b/vendor/src/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index 70111cace..000000000 --- a/vendor/src/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - - "github.com/coreos/etcd/client" - "golang.org/x/net/context" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring it's previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/vendor/src/github.com/coreos/etcd/client/keys.generated.go b/vendor/src/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 748283aa9..000000000 --- a/vendor/src/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1000 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct9 := r.ContainerType() - if yyct9 == codecSelferValueTypeMap1819 { - yyl9 := r.ReadMapStart() - if yyl9 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl9, d) - } - } else if yyct9 == codecSelferValueTypeArray1819 { - yyl9 := r.ReadArrayStart() - if yyl9 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl9, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys10Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys10Slc - var yyhl10 bool = l >= 0 - for yyj10 := 0; ; yyj10++ { - if yyhl10 { - if yyj10 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys10Slc = r.DecodeBytes(yys10Slc, true, true) - yys10 := string(yys10Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys10 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - x.Action = string(r.DecodeString()) - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys10) - } // end switch yys10 - } // end for yyj10 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - x.Action = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep19 := !z.EncBinary() - yy2arr19 := z.EncBasicHandle().StructToArray - var yyq19 [8]bool - _, _, _ = yysep19, yyq19, yy2arr19 - const yyr19 bool = false - yyq19[1] = x.Dir != false - yyq19[6] = x.Expiration != nil - yyq19[7] = x.TTL != 0 - var yynn19 int - if yyr19 || yy2arr19 { - r.EncodeArrayStart(8) - } else { - yynn19 = 5 - for _, b := range yyq19 { - if b { - yynn19++ - } - } - r.EncodeMapStart(yynn19) - yynn19 = 0 - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[1] { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq19[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if yym38 := z.TimeRtidIfBinc(); yym38 != 0 { - r.EncodeBuiltin(yym38, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym37 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq19[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if yym40 := z.TimeRtidIfBinc(); yym40 != 0 { - r.EncodeBuiltin(yym40, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym39 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym39 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[7] { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq19[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym44 := z.DecBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct45 := r.ContainerType() - if yyct45 == codecSelferValueTypeMap1819 { - yyl45 := r.ReadMapStart() - if yyl45 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl45, d) - } - } else if yyct45 == codecSelferValueTypeArray1819 { - yyl45 := r.ReadArrayStart() - if yyl45 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl45, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys46Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys46Slc - var yyhl46 bool = l >= 0 - for yyj46 := 0; ; yyj46++ { - if yyhl46 { - if yyj46 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys46Slc = r.DecodeBytes(yys46Slc, true, true) - yys46 := string(yys46Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys46 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - x.Dir = bool(r.DecodeBool()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv50 := &x.Nodes - yyv50.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - x.CreatedIndex = uint64(r.DecodeUint(64)) - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - x.ModifiedIndex = uint64(r.DecodeUint(64)) - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym54 := z.DecBinary() - _ = yym54 - if false { - } else if yym55 := z.TimeRtidIfBinc(); yym55 != 0 { - r.DecodeBuiltin(yym55, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym54 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym54 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - x.TTL = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys46) - } // end switch yys46 - } // end for yyj46 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - x.Dir = bool(r.DecodeBool()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv61 := &x.Nodes - yyv61.CodecDecodeSelf(d) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - x.CreatedIndex = uint64(r.DecodeUint(64)) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - x.ModifiedIndex = uint64(r.DecodeUint(64)) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if yym66 := z.TimeRtidIfBinc(); yym66 != 0 { - r.DecodeBuiltin(yym66, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym65 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - x.TTL = int64(r.DecodeInt(64)) - } - for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj57-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym69 := z.DecBinary() - _ = yym69 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv70 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv70 == nil { - r.EncodeNil() - } else { - yyv70.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv71 := *v - yyh71, yyl71 := z.DecSliceHelperStart() - var yyc71 bool - if yyl71 == 0 { - if yyv71 == nil { - yyv71 = []*Node{} - yyc71 = true - } else if len(yyv71) != 0 { - yyv71 = yyv71[:0] - yyc71 = true - } - } else if yyl71 > 0 { - var yyrr71, yyrl71 int - var yyrt71 bool - if yyl71 > cap(yyv71) { - - yyrg71 := len(yyv71) > 0 - yyv271 := yyv71 - yyrl71, yyrt71 = z.DecInferLen(yyl71, z.DecBasicHandle().MaxInitLen, 8) - if yyrt71 { - if yyrl71 <= cap(yyv71) { - yyv71 = yyv71[:yyrl71] - } else { - yyv71 = make([]*Node, yyrl71) - } - } else { - yyv71 = make([]*Node, yyrl71) - } - yyc71 = true - yyrr71 = len(yyv71) - if yyrg71 { - copy(yyv71, yyv271) - } - } else if yyl71 != len(yyv71) { - yyv71 = yyv71[:yyl71] - yyc71 = true - } - yyj71 := 0 - for ; yyj71 < yyrr71; yyj71++ { - yyh71.ElemContainerState(yyj71) - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw72 := yyv71[yyj71] - yyw72.CodecDecodeSelf(d) - } - - } - if yyrt71 { - for ; yyj71 < yyl71; yyj71++ { - yyv71 = append(yyv71, nil) - yyh71.ElemContainerState(yyj71) - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw73 := yyv71[yyj71] - yyw73.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj71 := 0 - for ; !r.CheckBreak(); yyj71++ { - - if yyj71 >= len(yyv71) { - yyv71 = append(yyv71, nil) // var yyz71 *Node - yyc71 = true - } - yyh71.ElemContainerState(yyj71) - if yyj71 < len(yyv71) { - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw74 := yyv71[yyj71] - yyw74.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj71 < len(yyv71) { - yyv71 = yyv71[:yyj71] - yyc71 = true - } else if yyj71 == 0 && yyv71 == nil { - yyv71 = []*Node{} - yyc71 = true - } - } - yyh71.End() - if yyc71 { - *v = yyv71 - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/keys.go b/vendor/src/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index 69dd83c66..000000000 --- a/vendor/src/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/ugorji/go/codec" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/pathutil" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/vendor/src/github.com/coreos/etcd/client/members.go b/vendor/src/github.com/coreos/etcd/client/members.go deleted file mode 100644 index 8b602db54..000000000 --- a/vendor/src/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/vendor/src/github.com/coreos/etcd/client/srv.go b/vendor/src/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index 06197967c..000000000 --- a/vendor/src/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/util.go b/vendor/src/github.com/coreos/etcd/client/util.go deleted file mode 100644 index fc0800b3d..000000000 --- a/vendor/src/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go deleted file mode 100644 index 4b998a484..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc provides utility function for cyclic redundancy check -// algorithms. -package crc - -import ( - "hash" - "hash/crc32" -) - -// The size of a CRC-32 checksum in bytes. -const Size = 4 - -type digest struct { - crc uint32 - tab *crc32.Table -} - -// New creates a new hash.Hash32 computing the CRC-32 checksum -// using the polynomial represented by the Table. -// Modified by xiangli to take a prevcrc. -func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} } - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -func (d *digest) Write(p []byte) (n int, err error) { - d.crc = crc32.Update(d.crc, d.tab, p) - return len(p), nil -} - -func (d *digest) Sum32() uint32 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go deleted file mode 100644 index 145886a1a..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package fileutil implements utility functions related to files and paths. -package fileutil - -import ( - "io/ioutil" - "os" - "path" - "sort" - - "github.com/coreos/pkg/capnslog" -) - -const ( - privateFileMode = 0600 - // owner can make/remove files inside the directory - privateDirMode = 0700 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil") -) - -// IsDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func IsDirWriteable(dir string) error { - f := path.Join(dir, ".touch") - if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil { - return err - } - return os.Remove(f) -} - -// ReadDir returns the filenames in the given directory in sorted order. -func ReadDir(dirpath string) ([]string, error) { - dir, err := os.Open(dirpath) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory -// does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(dir string) error { - err := os.MkdirAll(dir, privateDirMode) - if err != nil && err != os.ErrExist { - return err - } - return IsDirWriteable(dir) -} - -func Exist(name string) bool { - _, err := os.Stat(name) - return err == nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go deleted file mode 100644 index bf411d3a1..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -type Lock interface { - // Name returns the name of the file. - Name() string - // TryLock acquires exclusivity on the lock without blocking. - TryLock() error - // Lock acquires exclusivity on the lock. - Lock() error - // Unlock unlocks the lock. - Unlock() error - // Destroy should be called after Unlock to clean up - // the resources. - Destroy() error -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go deleted file mode 100644 index bd2bc8676..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "errors" - "os" - "syscall" - "time" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fname string - file *os.File -} - -func (l *lock) Name() string { - return l.fname -} - -func (l *lock) TryLock() error { - err := os.Chmod(l.fname, syscall.DMEXCL|0600) - if err != nil { - return err - } - - f, err := os.Open(l.fname) - if err != nil { - return ErrLocked - } - - l.file = f - return nil -} - -func (l *lock) Lock() error { - err := os.Chmod(l.fname, syscall.DMEXCL|0600) - if err != nil { - return err - } - - for { - f, err := os.Open(l.fname) - if err == nil { - l.file = f - return nil - } - time.Sleep(10 * time.Millisecond) - } -} - -func (l *lock) Unlock() error { - return l.file.Close() -} - -func (l *lock) Destroy() error { - return nil -} - -func NewLock(file string) (Lock, error) { - l := &lock{fname: file} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go deleted file mode 100644 index e3b0a0176..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build solaris - -package fileutil - -import ( - "errors" - "os" - "syscall" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - lock.Pid = 0 - err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) - if err != nil && err == syscall.EAGAIN { - return ErrLocked - } - return err -} - -func (l *lock) Lock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - lock.Pid = 0 - return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) -} - -func (l *lock) Unlock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) - if err != nil && err == syscall.EAGAIN { - return ErrLocked - } - return err -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.OpenFile(file, os.O_WRONLY, 0600) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go deleted file mode 100644 index 4f90e42ac..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!solaris - -package fileutil - -import ( - "errors" - "os" - "syscall" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) - if err != nil && err == syscall.EWOULDBLOCK { - return ErrLocked - } - return err -} - -func (l *lock) Lock() error { - return syscall.Flock(l.fd, syscall.LOCK_EX) -} - -func (l *lock) Unlock() error { - return syscall.Flock(l.fd, syscall.LOCK_UN) -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go deleted file mode 100644 index ddca9a669..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package fileutil - -import ( - "errors" - "os" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - return nil -} - -func (l *lock) Lock() error { - return nil -} - -func (l *lock) Unlock() error { - return nil -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go deleted file mode 100644 index c1a952bb7..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package fileutil - -import "os" - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int) error { - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go deleted file mode 100644 index c4bd4f4c8..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int) error { - // use mode = 1 to keep size - // see FALLOC_FL_KEEP_SIZE - err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes)) - if err != nil { - errno, ok := err.(syscall.Errno) - // treat not support as nil error - if ok && errno == syscall.ENOTSUP { - return nil - } - return err - } - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go deleted file mode 100644 index 375aa9719..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "path" - "sort" - "strings" - "time" -) - -func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - errC := make(chan error, 1) - go func() { - for { - fnames, err := ReadDir(dirname) - if err != nil { - errC <- err - return - } - newfnames := make([]string, 0) - for _, fname := range fnames { - if strings.HasSuffix(fname, suffix) { - newfnames = append(newfnames, fname) - } - } - sort.Strings(newfnames) - for len(newfnames) > int(max) { - f := path.Join(dirname, newfnames[0]) - l, err := NewLock(f) - if err != nil { - errC <- err - return - } - err = l.TryLock() - if err != nil { - break - } - err = os.Remove(f) - if err != nil { - errC <- err - return - } - err = l.Unlock() - if err != nil { - plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) - errC <- err - return - } - err = l.Destroy() - if err != nil { - plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err) - errC <- err - return - } - plog.Infof("purged file %s successfully", f) - newfnames = newfnames[1:] - } - select { - case <-time.After(interval): - case <-stop: - return - } - } - }() - return errC -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go deleted file mode 100644 index cd7fff08f..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package fileutil - -import "os" - -// Fdatasync is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. -func Fdatasync(f *os.File) error { - return f.Sync() -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go deleted file mode 100644 index 14c4b4808..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// Fdatasync is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. -func Fdatasync(f *os.File) error { - return syscall.Fdatasync(int(f.Fd())) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go deleted file mode 100644 index 6f1d37911..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package idutil implements utility functions for generating unique, -// randomized ids. -package idutil - -import ( - "math" - "sync" - "time" -) - -const ( - tsLen = 5 * 8 - cntLen = 8 - suffixLen = tsLen + cntLen -) - -// Generator generates unique identifiers based on counters, timestamps, and -// a node member ID. -// -// The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. -// | prefix | suffix | -// | 2 bytes | 5 bytes | 1 byte | -// | memberID | timestamp | cnt | -// -// The timestamp 5 bytes is different when the machine is restart -// after 1 ms and before 35 years. -// -// It increases suffix to generate the next id. -// The count field may overflow to timestamp field, which is intentional. -// It helps to extend the event window to 2^56. This doesn't break that -// id generated after restart is unique because etcd throughput is << -// 256req/ms(250k reqs/second). -type Generator struct { - mu sync.Mutex - // high order 2 bytes - prefix uint64 - // low order 6 bytes - suffix uint64 -} - -func NewGenerator(memberID uint16, now time.Time) *Generator { - prefix := uint64(memberID) << suffixLen - unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond) - suffix := lowbit(unixMilli, tsLen) << cntLen - return &Generator{ - prefix: prefix, - suffix: suffix, - } -} - -// Next generates a id that is unique. -func (g *Generator) Next() uint64 { - g.mu.Lock() - defer g.mu.Unlock() - g.suffix++ - id := g.prefix | lowbit(g.suffix, suffixLen) - return id -} - -func lowbit(x uint64, n uint) uint64 { - return x & (math.MaxUint64 >> (64 - n)) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go deleted file mode 100644 index f26254ba9..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go deleted file mode 100644 index 9d640a8af..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil defines interfaces for handling Protocol Buffer objects. -package pbutil - -import "github.com/coreos/pkg/capnslog" - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags") -) - -type Marshaler interface { - Marshal() (data []byte, err error) -} - -type Unmarshaler interface { - Unmarshal(data []byte) error -} - -func MustMarshal(m Marshaler) []byte { - d, err := m.Marshal() - if err != nil { - plog.Panicf("marshal should never fail (%v)", err) - } - return d -} - -func MustUnmarshal(um Unmarshaler, data []byte) { - if err := um.Unmarshal(data); err != nil { - plog.Panicf("unmarshal should never fail (%v)", err) - } -} - -func MaybeUnmarshal(um Unmarshaler, data []byte) bool { - if err := um.Unmarshal(data); err != nil { - return false - } - return true -} - -func GetBool(v *bool) (vv bool, set bool) { - if v == nil { - return false, false - } - return *v, true -} - -func Boolp(b bool) *bool { return &b } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/doc.go b/vendor/src/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index 04b4c38d1..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/id.go b/vendor/src/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 88cb9e634..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/set.go b/vendor/src/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index bb997174c..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/slice.go b/vendor/src/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0327950f7..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/urls.go b/vendor/src/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index ce2483ffa..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("URL scheme must be http or https: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 4fe9218c7..000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/vendor/src/github.com/coreos/etcd/raft/design.md b/vendor/src/github.com/coreos/etcd/raft/design.md deleted file mode 100644 index 7bc0531dc..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/design.md +++ /dev/null @@ -1,57 +0,0 @@ -## Progress - -Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. - -`replication message` is a `msgApp` with log entries. - -A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`. - -A progress is in one of the three state: `probe`, `replicate`, `snapshot`. - -``` - +--------------------------------------------------------+ - | send snapshot | - | | - +---------+----------+ +----------v---------+ - +---> probe | | snapshot | - | | max inflight = 1 <----------------------------------+ max inflight = 0 | - | +---------+----------+ +--------------------+ - | | 1. snapshot success - | | (next=snapshot.index + 1) - | | 2. snapshot failure - | | (no change) - | | 3. receives msgAppResp(rej=false&&index>lastsnap.index) - | | (match=m.index,next=match+1) -receives msgAppResp(rej=true) -(next=match+1)| | - | | - | | - | | receives msgAppResp(rej=false&&index>match) - | | (match=m.index,next=match+1) - | | - | | - | | - | +---------v----------+ - | | replicate | - +---+ max inflight = n | - +--------------------+ -``` - -When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`. - -When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower. - -When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`. - -A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress. - -A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question) - -A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied. - -### Flow Control - -1. limit the max size of message sent per message. Max should be configurable. -Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next` - -2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. diff --git a/vendor/src/github.com/coreos/etcd/raft/doc.go b/vendor/src/github.com/coreos/etcd/raft/doc.go deleted file mode 100644 index 6ed0bcfad..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/doc.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package raft sends and receives messages in the Protocol Buffer format -defined in the raftpb package. - -Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. -The state machine is kept in sync through the use of a replicated log. -For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. - -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample - -Usage - -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. - -To start a node from scratch: - - storage := raft.NewMemoryStorage() - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) - -To restart a node from previous state: - - storage := raft.NewMemoryStorage() - - // recover the in-memory storage from persistent - // snapshot, state and entries. - storage.ApplySnapshot(snapshot) - storage.SetHardState(state) - storage.Append(entries) - - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - - // restart raft without peer information. - // peer information is already included in the storage. - n := raft.RestartNode(c) - -Now that you are holding onto a Node you have a few responsibilities: - -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. - -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. - -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until after the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). - -Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. - -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). - -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. - -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. - -Third, when you receive a message from another node, pass it to Node.Step: - - func recvRaftRPC(ctx context.Context, m raftpb.Message) { - n.Step(ctx, m) - } - -Finally, you need to call Node.Tick() at regular intervals (probably -via a time.Ticker). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". - -The total state machine handling loop will look something like this: - - for { - select { - case <-s.Ticker: - n.Tick() - case rd := <-s.Node.Ready(): - saveToStorage(rd.State, rd.Entries, rd.Snapshot) - send(rd.Messages) - if !raft.IsEmptySnap(rd.Snapshot) { - processSnapshot(rd.Snapshot) - } - for _, entry := range rd.CommittedEntries { - process(entry) - if entry.Type == raftpb.EntryConfChange { - var cc raftpb.ConfChange - cc.Unmarshal(entry.Data) - s.Node.ApplyConfChange(cc) - } - s.Node.Advance() - case <-s.done: - return - } - } - -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: - - n.Propose(ctx, data) - -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. - -To add or remove node in a cluster, build ConfChange struct 'cc' and call: - - n.ProposeConfChange(ctx, cc) - -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: - - var cc raftpb.ConfChange - cc.Unmarshal(data) - n.ApplyConfChange(cc) - -Note: An ID represents a unique node in a cluster for all time. A -given ID MUST be used only once even if the old node has been removed. -This means that for example IP addresses make poor node IDs since they -may be reused. Node IDs must be non-zero. - -Implementation notes - -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. - -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. - -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. - -MessageType - -Package raft sends and receives message in Protocol Buffer format (defined -in raftpb package). Each state (follower, candidate, leader) implements its -own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when -advancing with the given raftpb.Message. Each step is determined by its -raftpb.MessageType. Note that every step is checked by one common method -'Step' that safety-checks the terms of node and incoming message to prevent -stale log entries: - - 'MsgHup' is used for election. If a node is a follower or candidate, the - 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or - candidate has not received any heartbeat before the election timeout, it - passes 'MsgHup' to its Step method and becomes (or remains) a candidate to - start a new election. - - 'MsgBeat' is an internal type that signals leaders to send a heartbeat of - the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in - the 'raft' struct is set as 'tickHeartbeat', and sends periodic heartbeat - messages of the 'MsgBeat' type to its followers. - - 'MsgProp' proposes to append data to its log entries. This is a special - type to redirect proposals to leader. Therefore, send method overwrites - raftpb.Message's term with its HardState's term to avoid attaching its - local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' - method, the leader first calls the 'appendEntry' method to append entries - to its log, and then calls 'bcastAppend' method to send those entries to - its peers. When passed to candidate, 'MsgProp' is dropped. When passed to - follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send - method. It is stored with sender's ID and later forwarded to leader by - rafthttp package. - - 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, - which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' - type. When 'MsgApp' is passed to candidate's Step method, candidate reverts - back to follower, because it indicates that there is a valid leader sending - 'MsgApp' messages. Candidate and follower respond to this message in - 'MsgAppResp' type. - - 'MsgAppResp' is response to log replication request('MsgApp'). When - 'MsgApp' is passed to candidate or follower's Step method, it responds by - calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft - mailbox. - - 'MsgVote' requests votes for election. When a node is a follower or - candidate and 'MsgHup' is passed to its Step method, then the node calls - 'campaign' method to campaign itself to become a leader. Once 'campaign' - method is called, the node becomes candidate and sends 'MsgVote' to peers - in cluster to request votes. When passed to leader or candidate's Step - method and the message's Term is lower than leader's or candidate's, - 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). - If leader or candidate receives 'MsgVote' with higher term, it will revert - back to follower. When 'MsgVote' is passed to follower, it votes for the - sender only when sender's last term is greater than MsgVote's term or - sender's last term is equal to MsgVote's term but sender's last committed - index is greater than or equal to follower's. - - 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is - passed to candidate, the candidate calculates how many votes it has won. If - it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. - If candidate receives majority of votes of denials, it reverts back to - follower. - - 'MsgSnap' requests to install a snapshot message. When a node has just - become a leader or the leader receives 'MsgProp' message, it calls - 'bcastAppend' method, which then calls 'sendAppend' method to each - follower. In 'sendAppend', if a leader fails to get term or entries, - the leader requests snapshot by sending 'MsgSnap' type message. - - 'MsgSnapStatus' tells the result of snapshot install message. When a - follower rejected 'MsgSnap', it indicates the snapshot request with - 'MsgSnap' had failed from network issues which causes the network layer - to fail to send out snapshots to its followers. Then leader considers - follower's progress as probe. When 'MsgSnap' were not rejected, it - indicates that the snapshot succeeded and the leader sets follower's - progress to probe and resumes its log replication. - - 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed - to candidate and message's term is higher than candidate's, the candidate - reverts back to follower and updates its committed index from the one in - this heartbeat. And it sends the message to its mailbox. When - 'MsgHeartbeat' is passed to follower's Step method and message's term is - higher than follower's, the follower updates its leaderID with the ID - from the message. - - 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' - is passed to leader's Step method, the leader knows which follower - responded. And only when the leader's last committed index is greater than - follower's Match index, the leader runs 'sendAppend` method. - - 'MsgUnreachable' tells that request(message) wasn't delivered. When - 'MsgUnreachable' is passed to leader's Step method, the leader discovers - that the follower that sent this 'MsgUnreachable' is not reachable, often - indicating 'MsgApp' is lost. When follower's progress state is replicate, - the leader sets it back to probe. - -*/ -package raft diff --git a/vendor/src/github.com/coreos/etcd/raft/log.go b/vendor/src/github.com/coreos/etcd/raft/log.go deleted file mode 100644 index 99cd1b31e..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/log.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "log" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type raftLog struct { - // storage contains all stable entries since the last snapshot. - storage Storage - - // unstable contains all unstable entries and snapshot. - // they will be saved into storage. - unstable unstable - - // committed is the highest log position that is known to be in - // stable storage on a quorum of nodes. - committed uint64 - // applied is the highest log position that the application has - // been instructed to apply to its state machine. - // Invariant: applied <= committed - applied uint64 - - logger Logger -} - -// newLog returns log using the given storage. It recovers the log to the state -// that it just commits and applies the latest snapshot. -func newLog(storage Storage, logger Logger) *raftLog { - if storage == nil { - log.Panic("storage must not be nil") - } - log := &raftLog{ - storage: storage, - logger: logger, - } - firstIndex, err := storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - lastIndex, err := storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - log.unstable.offset = lastIndex + 1 - log.unstable.logger = logger - // Initialize our committed and applied pointers to the time of the last compaction. - log.committed = firstIndex - 1 - log.applied = firstIndex - 1 - - return log -} - -func (l *raftLog) String() string { - return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) -} - -// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, -// it returns (last index of new entries, true). -func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { - lastnewi = index + uint64(len(ents)) - if l.matchTerm(index, logTerm) { - ci := l.findConflict(ents) - switch { - case ci == 0: - case ci <= l.committed: - l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) - default: - offset := index + 1 - l.append(ents[ci-offset:]...) - } - l.commitTo(min(committed, lastnewi)) - return lastnewi, true - } - return 0, false -} - -func (l *raftLog) append(ents ...pb.Entry) uint64 { - if len(ents) == 0 { - return l.lastIndex() - } - if after := ents[0].Index - 1; after < l.committed { - l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) - } - l.unstable.truncateAndAppend(ents) - return l.lastIndex() -} - -// findConflict finds the index of the conflict. -// It returns the first pair of conflicting entries between the existing -// entries and the given entries, if there are any. -// If there is no conflicting entries, and the existing entries contains -// all the given entries, zero will be returned. -// If there is no conflicting entries, but the given entries contains new -// entries, the index of the first new entry will be returned. -// An entry is considered to be conflicting if it has the same index but -// a different term. -// The first entry MUST have an index equal to the argument 'from'. -// The index of the given entries MUST be continuously increasing. -func (l *raftLog) findConflict(ents []pb.Entry) uint64 { - for _, ne := range ents { - if !l.matchTerm(ne.Index, ne.Term) { - if ne.Index <= l.lastIndex() { - l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", - ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) - } - return ne.Index - } - } - return 0 -} - -func (l *raftLog) unstableEntries() []pb.Entry { - if len(l.unstable.entries) == 0 { - return nil - } - return l.unstable.entries -} - -// nextEnts returns all the available entries for execution. -// If applied is smaller than the index of snapshot, it returns all committed -// entries after the index of snapshot. -func (l *raftLog) nextEnts() (ents []pb.Entry) { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, noLimit) - if err != nil { - l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) - } - return ents - } - return nil -} - -// hasNextEnts returns if there is any available entries for execution. This -// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). -func (l *raftLog) hasNextEnts() bool { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - return true - } - return false -} - -func (l *raftLog) snapshot() (pb.Snapshot, error) { - if l.unstable.snapshot != nil { - return *l.unstable.snapshot, nil - } - return l.storage.Snapshot() -} - -func (l *raftLog) firstIndex() uint64 { - if i, ok := l.unstable.maybeFirstIndex(); ok { - return i - } - index, err := l.storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return index -} - -func (l *raftLog) lastIndex() uint64 { - if i, ok := l.unstable.maybeLastIndex(); ok { - return i - } - i, err := l.storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return i -} - -func (l *raftLog) commitTo(tocommit uint64) { - // never decrease commit - if l.committed < tocommit { - if l.lastIndex() < tocommit { - l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) - } - l.committed = tocommit - } -} - -func (l *raftLog) appliedTo(i uint64) { - if i == 0 { - return - } - if l.committed < i || i < l.applied { - l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) - } - l.applied = i -} - -func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } - -func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } - -func (l *raftLog) lastTerm() uint64 { - t, err := l.term(l.lastIndex()) - if err != nil { - l.logger.Panicf("unexpected error when getting the last term (%v)", err) - } - return t -} - -func (l *raftLog) term(i uint64) (uint64, error) { - // the valid term range is [index of dummy entry, last index] - dummyIndex := l.firstIndex() - 1 - if i < dummyIndex || i > l.lastIndex() { - // TODO: return an error instead? - return 0, nil - } - - if t, ok := l.unstable.maybeTerm(i); ok { - return t, nil - } - - t, err := l.storage.Term(i) - if err == nil { - return t, nil - } - if err == ErrCompacted { - return 0, err - } - panic(err) // TODO(bdarnell) -} - -func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { - if i > l.lastIndex() { - return nil, nil - } - return l.slice(i, l.lastIndex()+1, maxsize) -} - -// allEntries returns all entries in the log. -func (l *raftLog) allEntries() []pb.Entry { - ents, err := l.entries(l.firstIndex(), noLimit) - if err == nil { - return ents - } - if err == ErrCompacted { // try again if there was a racing compaction - return l.allEntries() - } - // TODO (xiangli): handle error? - panic(err) -} - -// isUpToDate determines if the given (lastIndex,term) log is more up-to-date -// by comparing the index and term of the last entries in the existing logs. -// If the logs have last entries with different terms, then the log with the -// later term is more up-to-date. If the logs end with the same term, then -// whichever log has the larger lastIndex is more up-to-date. If the logs are -// the same, the given log is up-to-date. -func (l *raftLog) isUpToDate(lasti, term uint64) bool { - return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) -} - -func (l *raftLog) matchTerm(i, term uint64) bool { - t, err := l.term(i) - if err != nil { - return false - } - return t == term -} - -func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { - if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { - l.commitTo(maxIndex) - return true - } - return false -} - -func (l *raftLog) restore(s pb.Snapshot) { - l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) - l.committed = s.Metadata.Index - l.unstable.restore(s) -} - -// slice returns a slice of log entries from lo through hi-1, inclusive. -func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { - err := l.mustCheckOutOfBounds(lo, hi) - if err != nil { - return nil, err - } - if lo == hi { - return nil, nil - } - var ents []pb.Entry - if lo < l.unstable.offset { - storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) - if err == ErrCompacted { - return nil, err - } else if err == ErrUnavailable { - l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) - } else if err != nil { - panic(err) // TODO(bdarnell) - } - - // check if ents has reached the size limitation - if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { - return storedEnts, nil - } - - ents = storedEnts - } - if hi > l.unstable.offset { - unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) - if len(ents) > 0 { - ents = append([]pb.Entry{}, ents...) - ents = append(ents, unstable...) - } else { - ents = unstable - } - } - return limitSize(ents, maxSize), nil -} - -// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) -func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { - if lo > hi { - l.logger.Panicf("invalid slice %d > %d", lo, hi) - } - fi := l.firstIndex() - if lo < fi { - return ErrCompacted - } - - length := l.lastIndex() - fi + 1 - if lo < fi || hi > fi+length { - l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) - } - return nil -} - -func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { - if err == nil { - return t - } - if err == ErrCompacted { - return 0 - } - l.logger.Panicf("unexpected error (%v)", err) - return 0 -} diff --git a/vendor/src/github.com/coreos/etcd/raft/log_unstable.go b/vendor/src/github.com/coreos/etcd/raft/log_unstable.go deleted file mode 100644 index df90178f9..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/log_unstable.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import pb "github.com/coreos/etcd/raft/raftpb" - -// unstable.entries[i] has raft log position i+unstable.offset. -// Note that unstable.offset may be less than the highest log -// position in storage; this means that the next write to storage -// might need to truncate the log before persisting unstable.entries. -type unstable struct { - // the incoming unstable snapshot, if any. - snapshot *pb.Snapshot - // all entries that have not yet been written to storage. - entries []pb.Entry - offset uint64 - - logger Logger -} - -// maybeFirstIndex returns the index of the first possible entry in entries -// if it has a snapshot. -func (u *unstable) maybeFirstIndex() (uint64, bool) { - if u.snapshot != nil { - return u.snapshot.Metadata.Index + 1, true - } - return 0, false -} - -// maybeLastIndex returns the last index if it has at least one -// unstable entry or snapshot. -func (u *unstable) maybeLastIndex() (uint64, bool) { - if l := len(u.entries); l != 0 { - return u.offset + uint64(l) - 1, true - } - if u.snapshot != nil { - return u.snapshot.Metadata.Index, true - } - return 0, false -} - -// maybeTerm returns the term of the entry at index i, if there -// is any. -func (u *unstable) maybeTerm(i uint64) (uint64, bool) { - if i < u.offset { - if u.snapshot == nil { - return 0, false - } - if u.snapshot.Metadata.Index == i { - return u.snapshot.Metadata.Term, true - } - return 0, false - } - - last, ok := u.maybeLastIndex() - if !ok { - return 0, false - } - if i > last { - return 0, false - } - return u.entries[i-u.offset].Term, true -} - -func (u *unstable) stableTo(i, t uint64) { - gt, ok := u.maybeTerm(i) - if !ok { - return - } - // if i < offset, term is matched with the snapshot - // only update the unstable entries if term is matched with - // an unstable entry. - if gt == t && i >= u.offset { - u.entries = u.entries[i+1-u.offset:] - u.offset = i + 1 - } -} - -func (u *unstable) stableSnapTo(i uint64) { - if u.snapshot != nil && u.snapshot.Metadata.Index == i { - u.snapshot = nil - } -} - -func (u *unstable) restore(s pb.Snapshot) { - u.offset = s.Metadata.Index + 1 - u.entries = nil - u.snapshot = &s -} - -func (u *unstable) truncateAndAppend(ents []pb.Entry) { - after := ents[0].Index - 1 - switch { - case after == u.offset+uint64(len(u.entries))-1: - // after is the last index in the u.entries - // directly append - u.entries = append(u.entries, ents...) - case after < u.offset: - u.logger.Infof("replace the unstable entries from index %d", after+1) - // The log is being truncated to before our current offset - // portion, so set the offset and replace the entries - u.offset = after + 1 - u.entries = ents - default: - // truncate to after and copy to u.entries - // then append - u.logger.Infof("truncate the unstable entries to index %d", after) - u.entries = append([]pb.Entry{}, u.slice(u.offset, after+1)...) - u.entries = append(u.entries, ents...) - } -} - -func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { - u.mustCheckOutOfBounds(lo, hi) - return u.entries[lo-u.offset : hi-u.offset] -} - -// u.offset <= lo <= hi <= u.offset+len(u.offset) -func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { - if lo > hi { - u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) - } - upper := u.offset + uint64(len(u.entries)) - if lo < u.offset || hi > upper { - u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) - } -} diff --git a/vendor/src/github.com/coreos/etcd/raft/logger.go b/vendor/src/github.com/coreos/etcd/raft/logger.go deleted file mode 100644 index 31c194a06..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/logger.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "io/ioutil" - "log" - "os" -) - -type Logger interface { - Debug(v ...interface{}) - Debugf(format string, v ...interface{}) - - Error(v ...interface{}) - Errorf(format string, v ...interface{}) - - Info(v ...interface{}) - Infof(format string, v ...interface{}) - - Warning(v ...interface{}) - Warningf(format string, v ...interface{}) - - Fatal(v ...interface{}) - Fatalf(format string, v ...interface{}) - - Panic(v ...interface{}) - Panicf(format string, v ...interface{}) -} - -func SetLogger(l Logger) { raftLogger = l } - -var ( - defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} - discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} - raftLogger = Logger(defaultLogger) -) - -const ( - calldepth = 2 -) - -// DefaultLogger is a default implementation of the Logger interface. -type DefaultLogger struct { - *log.Logger - debug bool -} - -func (l *DefaultLogger) EnableTimestamps() { - l.SetFlags(l.Flags() | log.Ldate | log.Ltime) -} - -func (l *DefaultLogger) EnableDebug() { - l.debug = true -} - -func (l *DefaultLogger) Debug(v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) - } -} - -func (l *DefaultLogger) Debugf(format string, v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) - } -} - -func (l *DefaultLogger) Info(v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Infof(format string, v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Error(v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Errorf(format string, v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Warning(v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Warningf(format string, v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Fatal(v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Panic(v ...interface{}) { - l.Logger.Panic(v) -} - -func (l *DefaultLogger) Panicf(format string, v ...interface{}) { - l.Logger.Panicf(format, v...) -} - -func header(lvl, msg string) string { - return fmt.Sprintf("%s: %s", lvl, msg) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/node.go b/vendor/src/github.com/coreos/etcd/raft/node.go deleted file mode 100644 index c80dbc4b8..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/node.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - "golang.org/x/net/context" - pb "github.com/coreos/etcd/raft/raftpb" -) - -type SnapshotStatus int - -const ( - SnapshotFinish SnapshotStatus = 1 - SnapshotFailure SnapshotStatus = 2 -) - -var ( - emptyState = pb.HardState{} - - // ErrStopped is returned by methods on Nodes that have been stopped. - ErrStopped = errors.New("raft: stopped") -) - -// SoftState provides state that is useful for logging and debugging. -// The state is volatile and does not need to be persisted to the WAL. -type SoftState struct { - Lead uint64 - RaftState StateType -} - -func (a *SoftState) equal(b *SoftState) bool { - return a.Lead == b.Lead && a.RaftState == b.RaftState -} - -// Ready encapsulates the entries and messages that are ready to read, -// be saved to stable storage, committed or sent to other peers. -// All fields in Ready are read-only. -type Ready struct { - // The current volatile state of a Node. - // SoftState will be nil if there is no update. - // It is not required to consume or store SoftState. - *SoftState - - // The current state of a Node to be saved to stable storage BEFORE - // Messages are sent. - // HardState will be equal to empty state if there is no update. - pb.HardState - - // Entries specifies entries to be saved to stable storage BEFORE - // Messages are sent. - Entries []pb.Entry - - // Snapshot specifies the snapshot to be saved to stable storage. - Snapshot pb.Snapshot - - // CommittedEntries specifies entries to be committed to a - // store/state-machine. These have previously been committed to stable - // store. - CommittedEntries []pb.Entry - - // Messages specifies outbound messages to be sent AFTER Entries are - // committed to stable storage. - // If it contains a MsgSnap message, the application MUST report back to raft - // when the snapshot has been received or has failed by calling ReportSnapshot. - Messages []pb.Message -} - -func isHardStateEqual(a, b pb.HardState) bool { - return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit -} - -// IsEmptyHardState returns true if the given HardState is empty. -func IsEmptyHardState(st pb.HardState) bool { - return isHardStateEqual(st, emptyState) -} - -// IsEmptySnap returns true if the given Snapshot is empty. -func IsEmptySnap(sp pb.Snapshot) bool { - return sp.Metadata.Index == 0 -} - -func (rd Ready) containsUpdates() bool { - return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || - !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || - len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 -} - -// Node represents a node in a raft cluster. -type Node interface { - // Tick increments the internal logical clock for the Node by a single tick. Election - // timeouts and heartbeat timeouts are in units of ticks. - Tick() - // Campaign causes the Node to transition to candidate state and start campaigning to become leader. - Campaign(ctx context.Context) error - // Propose proposes that data be appended to the log. - Propose(ctx context.Context, data []byte) error - // ProposeConfChange proposes config change. - // At most one ConfChange can be in the process of going through consensus. - // Application needs to call ApplyConfChange when applying EntryConfChange type entry. - ProposeConfChange(ctx context.Context, cc pb.ConfChange) error - // Step advances the state machine using the given message. ctx.Err() will be returned, if any. - Step(ctx context.Context, msg pb.Message) error - - // Ready returns a channel that returns the current point-in-time state. - // Users of the Node must call Advance after retrieving the state returned by Ready. - // - // NOTE: No committed entries from the next Ready may be applied until all committed entries - // and snapshots from the previous one have finished. - Ready() <-chan Ready - - // Advance notifies the Node that the application has saved progress up to the last Ready. - // It prepares the node to return the next available Ready. - // - // The application should generally call Advance after it applies the entries in last Ready. - // - // However, as an optimization, the application may call Advance while it is applying the - // commands. For example. when the last Ready contains a snapshot, the application might take - // a long time to apply the snapshot data. To continue receiving Ready without blocking raft - // progress, it can call Advance before finish applying the last ready. To make this optimization - // work safely, when the application receives a Ready with softState.RaftState equal to Candidate - // it MUST apply all pending configuration changes if there is any. - // - // Here is a simple solution that waiting for ALL pending entries to get applied. - // ``` - // ... - // rd := <-n.Ready() - // go apply(rd.CommittedEntries) // optimization to apply asynchronously in FIFO order. - // if rd.SoftState.RaftState == StateCandidate { - // waitAllApplied() - // } - // n.Advance() - // ... - //``` - Advance() - // ApplyConfChange applies config change to the local node. - // Returns an opaque ConfState protobuf which must be recorded - // in snapshots. Will never return nil; it returns a pointer only - // to match MemoryStorage.Compact. - ApplyConfChange(cc pb.ConfChange) *pb.ConfState - // Status returns the current status of the raft state machine. - Status() Status - // ReportUnreachable reports the given node is not reachable for the last send. - ReportUnreachable(id uint64) - // ReportSnapshot reports the status of the sent snapshot. - ReportSnapshot(id uint64, status SnapshotStatus) - // Stop performs any necessary termination of the Node. - Stop() -} - -type Peer struct { - ID uint64 - Context []byte -} - -// StartNode returns a new Node given configuration and a list of raft peers. -// It appends a ConfChangeAddNode entry for each given peer to the initial log. -func StartNode(c *Config, peers []Peer) Node { - r := newRaft(c) - // become the follower at term 1 and apply initial configuration - // entries of term 1 - r.becomeFollower(1, None) - for _, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - d, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} - r.raftLog.append(e) - } - // Mark these initial entries as committed. - // TODO(bdarnell): These entries are still unstable; do we need to preserve - // the invariant that committed < unstable? - r.raftLog.committed = r.raftLog.lastIndex() - // Now apply them, mainly so that the application can call Campaign - // immediately after StartNode in tests. Note that these nodes will - // be added to raft twice: here and when the application's Ready - // loop calls ApplyConfChange. The calls to addNode must come after - // all calls to raftLog.append so progress.next is set after these - // bootstrapping entries (it is an error if we try to append these - // entries since they have already been committed). - // We do not set raftLog.applied so the application will be able - // to observe all conf changes via Ready.CommittedEntries. - for _, peer := range peers { - r.addNode(peer.ID) - } - - n := newNode() - go n.run(r) - return &n -} - -// RestartNode is similar to StartNode but does not take a list of peers. -// The current membership of the cluster will be restored from the Storage. -// If the caller has an existing state machine, pass in the last log index that -// has been applied to it; otherwise use zero. -func RestartNode(c *Config) Node { - r := newRaft(c) - - n := newNode() - go n.run(r) - return &n -} - -// node is the canonical implementation of the Node interface -type node struct { - propc chan pb.Message - recvc chan pb.Message - confc chan pb.ConfChange - confstatec chan pb.ConfState - readyc chan Ready - advancec chan struct{} - tickc chan struct{} - done chan struct{} - stop chan struct{} - status chan chan Status -} - -func newNode() node { - return node{ - propc: make(chan pb.Message), - recvc: make(chan pb.Message), - confc: make(chan pb.ConfChange), - confstatec: make(chan pb.ConfState), - readyc: make(chan Ready), - advancec: make(chan struct{}), - tickc: make(chan struct{}), - done: make(chan struct{}), - stop: make(chan struct{}), - status: make(chan chan Status), - } -} - -func (n *node) Stop() { - select { - case n.stop <- struct{}{}: - // Not already stopped, so trigger it - case <-n.done: - // Node has already been stopped - no need to do anything - return - } - // Block until the stop has been acknowledged by run() - <-n.done -} - -func (n *node) run(r *raft) { - var propc chan pb.Message - var readyc chan Ready - var advancec chan struct{} - var prevLastUnstablei, prevLastUnstablet uint64 - var havePrevLastUnstablei bool - var prevSnapi uint64 - var rd Ready - - lead := None - prevSoftSt := r.softState() - prevHardSt := emptyState - - for { - if advancec != nil { - readyc = nil - } else { - rd = newReady(r, prevSoftSt, prevHardSt) - if rd.containsUpdates() { - readyc = n.readyc - } else { - readyc = nil - } - } - - if lead != r.lead { - if r.hasLeader() { - if lead == None { - r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) - } else { - r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) - } - propc = n.propc - } else { - r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) - propc = nil - } - lead = r.lead - } - - select { - // TODO: maybe buffer the config propose if there exists one (the way - // described in raft dissertation) - // Currently it is dropped in Step silently. - case m := <-propc: - m.From = r.id - r.Step(m) - case m := <-n.recvc: - // filter out response message from unknown From. - if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m) { - r.Step(m) // raft never returns an error - } - case cc := <-n.confc: - if cc.NodeID == None { - r.resetPendingConf() - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - break - } - switch cc.Type { - case pb.ConfChangeAddNode: - r.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - // block incoming proposal when local node is - // removed - if cc.NodeID == r.id { - n.propc = nil - } - r.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - r.resetPendingConf() - default: - panic("unexpected conf type") - } - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - case <-n.tickc: - r.tick() - case readyc <- rd: - if rd.SoftState != nil { - prevSoftSt = rd.SoftState - } - if len(rd.Entries) > 0 { - prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index - prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term - havePrevLastUnstablei = true - } - if !IsEmptyHardState(rd.HardState) { - prevHardSt = rd.HardState - } - if !IsEmptySnap(rd.Snapshot) { - prevSnapi = rd.Snapshot.Metadata.Index - } - r.msgs = nil - advancec = n.advancec - case <-advancec: - if prevHardSt.Commit != 0 { - r.raftLog.appliedTo(prevHardSt.Commit) - } - if havePrevLastUnstablei { - r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) - havePrevLastUnstablei = false - } - r.raftLog.stableSnapTo(prevSnapi) - advancec = nil - case c := <-n.status: - c <- getStatus(r) - case <-n.stop: - close(n.done) - return - } - } -} - -// Tick increments the internal logical clock for this Node. Election timeouts -// and heartbeat timeouts are in units of ticks. -func (n *node) Tick() { - select { - case n.tickc <- struct{}{}: - case <-n.done: - } -} - -func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } - -func (n *node) Propose(ctx context.Context, data []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) -} - -func (n *node) Step(ctx context.Context, m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m) { - // TODO: return an error? - return nil - } - return n.step(ctx, m) -} - -func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) -} - -// Step advances the state machine using msgs. The ctx.Err() will be returned, -// if any. -func (n *node) step(ctx context.Context, m pb.Message) error { - ch := n.recvc - if m.Type == pb.MsgProp { - ch = n.propc - } - - select { - case ch <- m: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-n.done: - return ErrStopped - } -} - -func (n *node) Ready() <-chan Ready { return n.readyc } - -func (n *node) Advance() { - select { - case n.advancec <- struct{}{}: - case <-n.done: - } -} - -func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - var cs pb.ConfState - select { - case n.confc <- cc: - case <-n.done: - } - select { - case cs = <-n.confstatec: - case <-n.done: - } - return &cs -} - -func (n *node) Status() Status { - c := make(chan Status) - n.status <- c - return <-c -} - -func (n *node) ReportUnreachable(id uint64) { - select { - case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: - case <-n.done: - } -} - -func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - select { - case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: - case <-n.done: - } -} - -func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { - rd := Ready{ - Entries: r.raftLog.unstableEntries(), - CommittedEntries: r.raftLog.nextEnts(), - Messages: r.msgs, - } - if softSt := r.softState(); !softSt.equal(prevSoftSt) { - rd.SoftState = softSt - } - if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { - rd.HardState = hardSt - } - if r.raftLog.unstable.snapshot != nil { - rd.Snapshot = *r.raftLog.unstable.snapshot - } - return rd -} diff --git a/vendor/src/github.com/coreos/etcd/raft/progress.go b/vendor/src/github.com/coreos/etcd/raft/progress.go deleted file mode 100644 index 11f53409d..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/progress.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import "fmt" - -const ( - ProgressStateProbe ProgressStateType = iota - ProgressStateReplicate - ProgressStateSnapshot -) - -type ProgressStateType uint64 - -var prstmap = [...]string{ - "ProgressStateProbe", - "ProgressStateReplicate", - "ProgressStateSnapshot", -} - -func (st ProgressStateType) String() string { return prstmap[uint64(st)] } - -// Progress represents a follower’s progress in the view of the leader. Leader maintains -// progresses of all followers, and sends entries to the follower based on its progress. -type Progress struct { - Match, Next uint64 - // State defines how the leader should interact with the follower. - // - // When in ProgressStateProbe, leader sends at most one replication message - // per heartbeat interval. It also probes actual progress of the follower. - // - // When in ProgressStateReplicate, leader optimistically increases next - // to the latest entry sent after sending replication message. This is - // an optimized state for fast replicating log entries to the follower. - // - // When in ProgressStateSnapshot, leader should have sent out snapshot - // before and stops sending any replication message. - State ProgressStateType - // Paused is used in ProgressStateProbe. - // When Paused is true, raft should pause sending replication message to this peer. - Paused bool - // PendingSnapshot is used in ProgressStateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. - PendingSnapshot uint64 - - // RecentActive is true if the progress is recently active. Receiving any messages - // from the corresponding follower indicates the progress is active. - // RecentActive can be reset to false after an election timeout. - RecentActive bool - - // inflights is a sliding window for the inflight messages. - // When inflights is full, no more message should be sent. - // When a leader sends out a message, the index of the last - // entry should be added to inflights. The index MUST be added - // into inflights in order. - // When a leader receives a reply, the previous inflights should - // be freed by calling inflights.freeTo. - ins *inflights -} - -func (pr *Progress) resetState(state ProgressStateType) { - pr.Paused = false - pr.RecentActive = false - pr.PendingSnapshot = 0 - pr.State = state - pr.ins.reset() -} - -func (pr *Progress) becomeProbe() { - // If the original state is ProgressStateSnapshot, progress knows that - // the pending snapshot has been sent to this peer successfully, then - // probes from pendingSnapshot + 1. - if pr.State == ProgressStateSnapshot { - pendingSnapshot := pr.PendingSnapshot - pr.resetState(ProgressStateProbe) - pr.Next = max(pr.Match+1, pendingSnapshot+1) - } else { - pr.resetState(ProgressStateProbe) - pr.Next = pr.Match + 1 - } -} - -func (pr *Progress) becomeReplicate() { - pr.resetState(ProgressStateReplicate) - pr.Next = pr.Match + 1 -} - -func (pr *Progress) becomeSnapshot(snapshoti uint64) { - pr.resetState(ProgressStateSnapshot) - pr.PendingSnapshot = snapshoti -} - -// maybeUpdate returns false if the given n index comes from an outdated message. -// Otherwise it updates the progress and returns true. -func (pr *Progress) maybeUpdate(n uint64) bool { - var updated bool - if pr.Match < n { - pr.Match = n - updated = true - pr.resume() - } - if pr.Next < n+1 { - pr.Next = n + 1 - } - return updated -} - -func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } - -// maybeDecrTo returns false if the given to index comes from an out of order message. -// Otherwise it decreases the progress next index to min(rejected, last) and returns true. -func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { - if pr.State == ProgressStateReplicate { - // the rejection must be stale if the progress has matched and "rejected" - // is smaller than "match". - if rejected <= pr.Match { - return false - } - // directly decrease next to match + 1 - pr.Next = pr.Match + 1 - return true - } - - // the rejection must be stale if "rejected" does not match next - 1 - if pr.Next-1 != rejected { - return false - } - - if pr.Next = min(rejected, last+1); pr.Next < 1 { - pr.Next = 1 - } - pr.resume() - return true -} - -func (pr *Progress) pause() { pr.Paused = true } -func (pr *Progress) resume() { pr.Paused = false } - -// isPaused returns whether progress stops sending message. -func (pr *Progress) isPaused() bool { - switch pr.State { - case ProgressStateProbe: - return pr.Paused - case ProgressStateReplicate: - return pr.ins.full() - case ProgressStateSnapshot: - return true - default: - panic("unexpected state") - } -} - -func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } - -// maybeSnapshotAbort unsets pendingSnapshot if Match is equal or higher than -// the pendingSnapshot -func (pr *Progress) maybeSnapshotAbort() bool { - return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot -} - -func (pr *Progress) String() string { - return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.isPaused(), pr.PendingSnapshot) -} - -type inflights struct { - // the starting index in the buffer - start int - // number of inflights in the buffer - count int - - // the size of the buffer - size int - buffer []uint64 -} - -func newInflights(size int) *inflights { - return &inflights{ - size: size, - buffer: make([]uint64, size), - } -} - -// add adds an inflight into inflights -func (in *inflights) add(inflight uint64) { - if in.full() { - panic("cannot add into a full inflights") - } - next := in.start + in.count - if next >= in.size { - next -= in.size - } - in.buffer[next] = inflight - in.count++ -} - -// freeTo frees the inflights smaller or equal to the given `to` flight. -func (in *inflights) freeTo(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { - // out of the left side of the window - return - } - - i, idx := 0, in.start - for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight - break - } - - // increase index and maybe rotate - if idx++; idx >= in.size { - idx -= in.size - } - } - // free i inflights and set new start index - in.count -= i - in.start = idx -} - -func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } - -// full returns true if the inflights is full. -func (in *inflights) full() bool { - return in.count == in.size -} - -// resets frees all inflights. -func (in *inflights) reset() { - in.count = 0 - in.start = 0 -} diff --git a/vendor/src/github.com/coreos/etcd/raft/raft.go b/vendor/src/github.com/coreos/etcd/raft/raft.go deleted file mode 100644 index 5639fcb8f..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raft.go +++ /dev/null @@ -1,898 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - "fmt" - "math" - "math/rand" - "sort" - "strings" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// None is a placeholder node ID used when there is no leader. -const None uint64 = 0 -const noLimit = math.MaxUint64 - -var errNoLeader = errors.New("no leader") - -var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") - -// Possible values for StateType. -const ( - StateFollower StateType = iota - StateCandidate - StateLeader -) - -// StateType represents the role of a node in a cluster. -type StateType uint64 - -var stmap = [...]string{ - "StateFollower", - "StateCandidate", - "StateLeader", -} - -func (st StateType) String() string { - return stmap[uint64(st)] -} - -// Config contains the parameters to start a raft. -type Config struct { - // ID is the identity of the local raft. ID cannot be 0. - ID uint64 - - // peers contains the IDs of all nodes (including self) in the raft cluster. It - // should only be set when starting a new raft cluster. Restarting raft from - // previous configuration will panic if peers is set. peer is private and only - // used for testing right now. - peers []uint64 - - // ElectionTick is the number of Node.Tick invocations that must pass between - // elections. That is, if a follower does not receive any message from the - // leader of current term before ElectionTick has elapsed, it will become - // candidate and start an election. ElectionTick must be greater than - // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid - // unnecessary leader switching. - ElectionTick int - // HeartbeatTick is the number of Node.Tick invocations that must pass between - // heartbeats. That is, a leader sends heartbeat messages to maintain its - // leadership every HeartbeatTick ticks. - HeartbeatTick int - - // Storage is the storage for raft. raft generates entries and states to be - // stored in storage. raft reads the persisted entries and states out of - // Storage when it needs. raft reads out the previous state and configuration - // out of storage when restarting. - Storage Storage - // Applied is the last applied index. It should only be set when restarting - // raft. raft will not return entries to the application smaller or equal to - // Applied. If Applied is unset when restarting, raft might return previous - // applied entries. This is a very application dependent configuration. - Applied uint64 - - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. - MaxSizePerMsg uint64 - // MaxInflightMsgs limits the max number of in-flight append messages during - // optimistic replication phase. The application transportation layer usually - // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid - // overflowing that sending buffer. TODO (xiangli): feedback to application to - // limit the proposal rate? - MaxInflightMsgs int - - // CheckQuorum specifies if the leader should check quorum activity. Leader - // steps down when quorum is not active for an electionTimeout. - CheckQuorum bool - - // Logger is the logger used for raft log. For multinode which can host - // multiple raft group, each raft group can have its own logger - Logger Logger -} - -func (c *Config) validate() error { - if c.ID == None { - return errors.New("cannot use none as id") - } - - if c.HeartbeatTick <= 0 { - return errors.New("heartbeat tick must be greater than 0") - } - - if c.ElectionTick <= c.HeartbeatTick { - return errors.New("election tick must be greater than heartbeat tick") - } - - if c.Storage == nil { - return errors.New("storage cannot be nil") - } - - if c.MaxInflightMsgs <= 0 { - return errors.New("max inflight messages must be greater than 0") - } - - if c.Logger == nil { - c.Logger = raftLogger - } - - return nil -} - -type raft struct { - id uint64 - - Term uint64 - Vote uint64 - - // the log - raftLog *raftLog - - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - - state StateType - - votes map[uint64]bool - - msgs []pb.Message - - // the leader id - lead uint64 - - // New configuration is ignored if there exists unapplied configuration. - pendingConf bool - - // number of ticks since it reached last electionTimeout when it is leader - // or candidate. - // number of ticks since it reached last electionTimeout or received a - // valid message from current leader when it is a follower. - electionElapsed int - - // number of ticks since it reached last heartbeatTimeout. - // only leader keeps heartbeatElapsed. - heartbeatElapsed int - - checkQuorum bool - - heartbeatTimeout int - electionTimeout int - rand *rand.Rand - tick func() - step stepFunc - - logger Logger -} - -func newRaft(c *Config) *raft { - if err := c.validate(); err != nil { - panic(err.Error()) - } - raftlog := newLog(c.Storage, c.Logger) - hs, cs, err := c.Storage.InitialState() - if err != nil { - panic(err) // TODO(bdarnell) - } - peers := c.peers - if len(cs.Nodes) > 0 { - if len(peers) > 0 { - // TODO(bdarnell): the peers argument is always nil except in - // tests; the argument should be removed and these tests should be - // updated to specify their nodes through a snapshot. - panic("cannot specify both newRaft(peers) and ConfState.Nodes)") - } - peers = cs.Nodes - } - r := &raft{ - id: c.ID, - lead: None, - raftLog: raftlog, - maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - electionTimeout: c.ElectionTick, - heartbeatTimeout: c.HeartbeatTick, - logger: c.Logger, - checkQuorum: c.CheckQuorum, - } - r.rand = rand.New(rand.NewSource(int64(c.ID))) - for _, p := range peers { - r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} - } - if !isHardStateEqual(hs, emptyState) { - r.loadState(hs) - } - if c.Applied > 0 { - raftlog.appliedTo(c.Applied) - } - r.becomeFollower(r.Term, None) - - nodesStrs := make([]string, 0) - for _, n := range r.nodes() { - nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) - } - - r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", - r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) - return r -} - -func (r *raft) hasLeader() bool { return r.lead != None } - -func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } - -func (r *raft) hardState() pb.HardState { - return pb.HardState{ - Term: r.Term, - Vote: r.Vote, - Commit: r.raftLog.committed, - } -} - -func (r *raft) quorum() int { return len(r.prs)/2 + 1 } - -func (r *raft) nodes() []uint64 { - nodes := make([]uint64, 0, len(r.prs)) - for id := range r.prs { - nodes = append(nodes, id) - } - sort.Sort(uint64Slice(nodes)) - return nodes -} - -// send persists state to stable storage and then sends to its mailbox. -func (r *raft) send(m pb.Message) { - m.From = r.id - // do not attach term to MsgProp - // proposals are a way to forward to the leader and - // should be treated as local message. - if m.Type != pb.MsgProp { - m.Term = r.Term - } - r.msgs = append(r.msgs, m) -} - -// sendAppend sends RPC, with entries to the given peer. -func (r *raft) sendAppend(to uint64) { - pr := r.prs[to] - if pr.isPaused() { - return - } - m := pb.Message{} - m.To = to - - term, errt := r.raftLog.term(pr.Next - 1) - ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) - - if errt != nil || erre != nil { // send snapshot if we failed to get term or entries - if !pr.RecentActive { - r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) - return - } - - m.Type = pb.MsgSnap - snapshot, err := r.raftLog.snapshot() - if err != nil { - if err == ErrSnapshotTemporarilyUnavailable { - r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) - return - } - panic(err) // TODO(bdarnell) - } - if IsEmptySnap(snapshot) { - panic("need non-empty snapshot") - } - m.Snapshot = snapshot - sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term - r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", - r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) - pr.becomeSnapshot(sindex) - r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) - } else { - m.Type = pb.MsgApp - m.Index = pr.Next - 1 - m.LogTerm = term - m.Entries = ents - m.Commit = r.raftLog.committed - if n := len(m.Entries); n != 0 { - switch pr.State { - // optimistically increase the next when in ProgressStateReplicate - case ProgressStateReplicate: - last := m.Entries[n-1].Index - pr.optimisticUpdate(last) - pr.ins.add(last) - case ProgressStateProbe: - pr.pause() - default: - r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) - } - } - } - r.send(m) -} - -// sendHeartbeat sends an empty MsgApp -func (r *raft) sendHeartbeat(to uint64) { - // Attach the commit as min(to.matched, r.committed). - // When the leader sends out heartbeat message, - // the receiver(follower) might not be matched with the leader - // or it might not have all the committed entries. - // The leader MUST NOT forward the follower's commit to - // an unmatched index. - commit := min(r.prs[to].Match, r.raftLog.committed) - m := pb.Message{ - To: to, - Type: pb.MsgHeartbeat, - Commit: commit, - } - r.send(m) -} - -// bcastAppend sends RPC, with entries to all peers that are not up-to-date -// according to the progress recorded in r.prs. -func (r *raft) bcastAppend() { - for id := range r.prs { - if id == r.id { - continue - } - r.sendAppend(id) - } -} - -// bcastHeartbeat sends RPC, without entries to all the peers. -func (r *raft) bcastHeartbeat() { - for id := range r.prs { - if id == r.id { - continue - } - r.sendHeartbeat(id) - r.prs[id].resume() - } -} - -// maybeCommit attempts to advance the commit index. Returns true if -// the commit index changed (in which case the caller should call -// r.bcastAppend). -func (r *raft) maybeCommit() bool { - // TODO(bmizerany): optimize.. Currently naive - mis := make(uint64Slice, 0, len(r.prs)) - for id := range r.prs { - mis = append(mis, r.prs[id].Match) - } - sort.Sort(sort.Reverse(mis)) - mci := mis[r.quorum()-1] - return r.raftLog.maybeCommit(mci, r.Term) -} - -func (r *raft) reset(term uint64) { - if r.Term != term { - r.Term = term - r.Vote = None - } - r.lead = None - - r.electionElapsed = 0 - r.heartbeatElapsed = 0 - - r.votes = make(map[uint64]bool) - for id := range r.prs { - r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)} - if id == r.id { - r.prs[id].Match = r.raftLog.lastIndex() - } - } - r.pendingConf = false -} - -func (r *raft) appendEntry(es ...pb.Entry) { - li := r.raftLog.lastIndex() - for i := range es { - es[i].Term = r.Term - es[i].Index = li + 1 + uint64(i) - } - r.raftLog.append(es...) - r.prs[r.id].maybeUpdate(r.raftLog.lastIndex()) - // Regardless of maybeCommit's return, our caller will call bcastAppend. - r.maybeCommit() -} - -// tickElection is run by followers and candidates after r.electionTimeout. -func (r *raft) tickElection() { - if !r.promotable() { - r.electionElapsed = 0 - return - } - r.electionElapsed++ - if r.isElectionTimeout() { - r.electionElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) - } -} - -// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. -func (r *raft) tickHeartbeat() { - r.heartbeatElapsed++ - r.electionElapsed++ - - if r.electionElapsed >= r.electionTimeout { - r.electionElapsed = 0 - if r.checkQuorum { - r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) - } - } - - if r.state != StateLeader { - return - } - - if r.heartbeatElapsed >= r.heartbeatTimeout { - r.heartbeatElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) - } -} - -func (r *raft) becomeFollower(term uint64, lead uint64) { - r.step = stepFollower - r.reset(term) - r.tick = r.tickElection - r.lead = lead - r.state = StateFollower - r.logger.Infof("%x became follower at term %d", r.id, r.Term) -} - -func (r *raft) becomeCandidate() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateLeader { - panic("invalid transition [leader -> candidate]") - } - r.step = stepCandidate - r.reset(r.Term + 1) - r.tick = r.tickElection - r.Vote = r.id - r.state = StateCandidate - r.logger.Infof("%x became candidate at term %d", r.id, r.Term) -} - -func (r *raft) becomeLeader() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateFollower { - panic("invalid transition [follower -> leader]") - } - r.step = stepLeader - r.reset(r.Term) - r.tick = r.tickHeartbeat - r.lead = r.id - r.state = StateLeader - ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) - } - - for _, e := range ents { - if e.Type != pb.EntryConfChange { - continue - } - if r.pendingConf { - panic("unexpected double uncommitted config entry") - } - r.pendingConf = true - } - r.appendEntry(pb.Entry{Data: nil}) - r.logger.Infof("%x became leader at term %d", r.id, r.Term) -} - -func (r *raft) campaign() { - r.becomeCandidate() - if r.quorum() == r.poll(r.id, true) { - r.becomeLeader() - return - } - for id := range r.prs { - if id == r.id { - continue - } - r.logger.Infof("%x [logterm: %d, index: %d] sent vote request to %x at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), id, r.Term) - r.send(pb.Message{To: id, Type: pb.MsgVote, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm()}) - } -} - -func (r *raft) poll(id uint64, v bool) (granted int) { - if v { - r.logger.Infof("%x received vote from %x at term %d", r.id, id, r.Term) - } else { - r.logger.Infof("%x received vote rejection from %x at term %d", r.id, id, r.Term) - } - if _, ok := r.votes[id]; !ok { - r.votes[id] = v - } - for _, vv := range r.votes { - if vv { - granted++ - } - } - return granted -} - -func (r *raft) Step(m pb.Message) error { - if m.Type == pb.MsgHup { - if r.state != StateLeader { - r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) - r.campaign() - } else { - r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) - } - return nil - } - - switch { - case m.Term == 0: - // local message - case m.Term > r.Term: - lead := m.From - if m.Type == pb.MsgVote { - lead = None - } - r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - r.becomeFollower(m.Term, lead) - case m.Term < r.Term: - // ignore - r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - return nil - } - r.step(r, m) - return nil -} - -type stepFunc func(r *raft, m pb.Message) - -func stepLeader(r *raft, m pb.Message) { - - // These message types do not require any progress for m.From. - switch m.Type { - case pb.MsgBeat: - r.bcastHeartbeat() - return - case pb.MsgCheckQuorum: - if !r.checkQuorumActive() { - r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) - r.becomeFollower(r.Term, None) - } - return - case pb.MsgProp: - if len(m.Entries) == 0 { - r.logger.Panicf("%x stepped empty MsgProp", r.id) - } - if _, ok := r.prs[r.id]; !ok { - // If we are not currently a member of the range (i.e. this node - // was removed from the configuration while serving as leader), - // drop any new proposals. - return - } - for i, e := range m.Entries { - if e.Type == pb.EntryConfChange { - if r.pendingConf { - m.Entries[i] = pb.Entry{Type: pb.EntryNormal} - } - r.pendingConf = true - } - } - r.appendEntry(m.Entries...) - r.bcastAppend() - return - case pb.MsgVote: - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - return - } - - // All other message types require a progress for m.From (pr). - pr, prOk := r.prs[m.From] - if !prOk { - r.logger.Debugf("no progress available for %x", m.From) - return - } - switch m.Type { - case pb.MsgAppResp: - pr.RecentActive = true - - if m.Reject { - r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", - r.id, m.RejectHint, m.From, m.Index) - if pr.maybeDecrTo(m.Index, m.RejectHint) { - r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.sendAppend(m.From) - } - } else { - oldPaused := pr.isPaused() - if pr.maybeUpdate(m.Index) { - switch { - case pr.State == ProgressStateProbe: - pr.becomeReplicate() - case pr.State == ProgressStateSnapshot && pr.maybeSnapshotAbort(): - r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - pr.becomeProbe() - case pr.State == ProgressStateReplicate: - pr.ins.freeTo(m.Index) - } - - if r.maybeCommit() { - r.bcastAppend() - } else if oldPaused { - // update() reset the wait state on this node. If we had delayed sending - // an update before, send it now. - r.sendAppend(m.From) - } - } - } - case pb.MsgHeartbeatResp: - pr.RecentActive = true - - // free one slot for the full inflights window to allow progress. - if pr.State == ProgressStateReplicate && pr.ins.full() { - pr.ins.freeFirstOne() - } - if pr.Match < r.raftLog.lastIndex() { - r.sendAppend(m.From) - } - case pb.MsgSnapStatus: - if pr.State != ProgressStateSnapshot { - return - } - if !m.Reject { - pr.becomeProbe() - r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } else { - pr.snapshotFailure() - pr.becomeProbe() - r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } - // If snapshot finish, wait for the msgAppResp from the remote node before sending - // out the next msgApp. - // If snapshot failure, wait for a heartbeat interval before next try - pr.pause() - case pb.MsgUnreachable: - // During optimistic replication, if the remote becomes unreachable, - // there is huge probability that a MsgApp is lost. - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) - } -} - -func stepCandidate(r *raft, m pb.Message) { - switch m.Type { - case pb.MsgProp: - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - case pb.MsgApp: - r.becomeFollower(r.Term, m.From) - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.becomeFollower(r.Term, m.From) - r.handleHeartbeat(m) - case pb.MsgSnap: - r.becomeFollower(m.Term, m.From) - r.handleSnapshot(m) - case pb.MsgVote: - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - case pb.MsgVoteResp: - gr := r.poll(m.From, !m.Reject) - r.logger.Infof("%x [quorum:%d] has received %d votes and %d vote rejections", r.id, r.quorum(), gr, len(r.votes)-gr) - switch r.quorum() { - case gr: - r.becomeLeader() - r.bcastAppend() - case len(r.votes) - gr: - r.becomeFollower(r.Term, None) - } - } -} - -func stepFollower(r *raft, m pb.Message) { - switch m.Type { - case pb.MsgProp: - if r.lead == None { - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - } - m.To = r.lead - r.send(m) - case pb.MsgApp: - r.electionElapsed = 0 - r.lead = m.From - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.electionElapsed = 0 - r.lead = m.From - r.handleHeartbeat(m) - case pb.MsgSnap: - r.electionElapsed = 0 - r.handleSnapshot(m) - case pb.MsgVote: - if (r.Vote == None || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { - r.electionElapsed = 0 - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] voted for %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.Vote = m.From - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp}) - } else { - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - } - } -} - -func (r *raft) handleAppendEntries(m pb.Message) { - if m.Index < r.raftLog.committed { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - return - } - - if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) - } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", - r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) - } -} - -func (r *raft) handleHeartbeat(m pb.Message) { - r.raftLog.commitTo(m.Commit) - r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp}) -} - -func (r *raft) handleSnapshot(m pb.Message) { - sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term - if r.restore(m.Snapshot) { - r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) - } else { - r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - } -} - -// restore recovers the state machine from a snapshot. It restores the log and the -// configuration of state machine. -func (r *raft) restore(s pb.Snapshot) bool { - if s.Metadata.Index <= r.raftLog.committed { - return false - } - if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - r.raftLog.commitTo(s.Metadata.Index) - return false - } - - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - - r.raftLog.restore(s) - r.prs = make(map[uint64]*Progress) - for _, n := range s.Metadata.ConfState.Nodes { - match, next := uint64(0), uint64(r.raftLog.lastIndex())+1 - if n == r.id { - match = next - 1 - } else { - match = 0 - } - r.setProgress(n, match, next) - r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n]) - } - return true -} - -// promotable indicates whether state machine can be promoted to leader, -// which is true when its own id is in progress list. -func (r *raft) promotable() bool { - _, ok := r.prs[r.id] - return ok -} - -func (r *raft) addNode(id uint64) { - if _, ok := r.prs[id]; ok { - // Ignore any redundant addNode calls (which can happen because the - // initial bootstrapping entries are applied twice). - return - } - - r.setProgress(id, 0, r.raftLog.lastIndex()+1) - r.pendingConf = false -} - -func (r *raft) removeNode(id uint64) { - r.delProgress(id) - r.pendingConf = false - // The quorum size is now smaller, so see if any pending entries can - // be committed. - if r.maybeCommit() { - r.bcastAppend() - } -} - -func (r *raft) resetPendingConf() { r.pendingConf = false } - -func (r *raft) setProgress(id, match, next uint64) { - r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} -} - -func (r *raft) delProgress(id uint64) { - delete(r.prs, id) -} - -func (r *raft) loadState(state pb.HardState) { - if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { - r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) - } - r.raftLog.committed = state.Commit - r.Term = state.Term - r.Vote = state.Vote -} - -// isElectionTimeout returns true if r.electionElapsed is greater than the -// randomized election timeout in (electiontimeout, 2 * electiontimeout - 1). -// Otherwise, it returns false. -func (r *raft) isElectionTimeout() bool { - d := r.electionElapsed - r.electionTimeout - if d < 0 { - return false - } - return d > r.rand.Int()%r.electionTimeout -} - -// checkQuorumActive returns true if the quorum is active from -// the view of the local raft state machine. Otherwise, it returns -// false. -// checkQuorumActive also resets all RecentActive to false. -func (r *raft) checkQuorumActive() bool { - var act int - - for id := range r.prs { - if id == r.id { // self is always active - act++ - continue - } - - if r.prs[id].RecentActive { - act++ - } - - r.prs[id].RecentActive = false - } - - return act >= r.quorum() -} diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go deleted file mode 100644 index 319134cde..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ /dev/null @@ -1,1768 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: raft.proto -// DO NOT EDIT! - -/* - Package raftpb is a generated protocol buffer package. - - It is generated from these files: - raft.proto - - It has these top-level messages: - Entry - SnapshotMetadata - Snapshot - Message - HardState - ConfState - ConfChange -*/ -package raftpb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type EntryType int32 - -const ( - EntryNormal EntryType = 0 - EntryConfChange EntryType = 1 -) - -var EntryType_name = map[int32]string{ - 0: "EntryNormal", - 1: "EntryConfChange", -} -var EntryType_value = map[string]int32{ - "EntryNormal": 0, - "EntryConfChange": 1, -} - -func (x EntryType) Enum() *EntryType { - p := new(EntryType) - *p = x - return p -} -func (x EntryType) String() string { - return proto.EnumName(EntryType_name, int32(x)) -} -func (x *EntryType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") - if err != nil { - return err - } - *x = EntryType(value) - return nil -} - -type MessageType int32 - -const ( - MsgHup MessageType = 0 - MsgBeat MessageType = 1 - MsgProp MessageType = 2 - MsgApp MessageType = 3 - MsgAppResp MessageType = 4 - MsgVote MessageType = 5 - MsgVoteResp MessageType = 6 - MsgSnap MessageType = 7 - MsgHeartbeat MessageType = 8 - MsgHeartbeatResp MessageType = 9 - MsgUnreachable MessageType = 10 - MsgSnapStatus MessageType = 11 - MsgCheckQuorum MessageType = 12 -) - -var MessageType_name = map[int32]string{ - 0: "MsgHup", - 1: "MsgBeat", - 2: "MsgProp", - 3: "MsgApp", - 4: "MsgAppResp", - 5: "MsgVote", - 6: "MsgVoteResp", - 7: "MsgSnap", - 8: "MsgHeartbeat", - 9: "MsgHeartbeatResp", - 10: "MsgUnreachable", - 11: "MsgSnapStatus", - 12: "MsgCheckQuorum", -} -var MessageType_value = map[string]int32{ - "MsgHup": 0, - "MsgBeat": 1, - "MsgProp": 2, - "MsgApp": 3, - "MsgAppResp": 4, - "MsgVote": 5, - "MsgVoteResp": 6, - "MsgSnap": 7, - "MsgHeartbeat": 8, - "MsgHeartbeatResp": 9, - "MsgUnreachable": 10, - "MsgSnapStatus": 11, - "MsgCheckQuorum": 12, -} - -func (x MessageType) Enum() *MessageType { - p := new(MessageType) - *p = x - return p -} -func (x MessageType) String() string { - return proto.EnumName(MessageType_name, int32(x)) -} -func (x *MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") - if err != nil { - return err - } - *x = MessageType(value) - return nil -} - -type ConfChangeType int32 - -const ( - ConfChangeAddNode ConfChangeType = 0 - ConfChangeRemoveNode ConfChangeType = 1 - ConfChangeUpdateNode ConfChangeType = 2 -) - -var ConfChangeType_name = map[int32]string{ - 0: "ConfChangeAddNode", - 1: "ConfChangeRemoveNode", - 2: "ConfChangeUpdateNode", -} -var ConfChangeType_value = map[string]int32{ - "ConfChangeAddNode": 0, - "ConfChangeRemoveNode": 1, - "ConfChangeUpdateNode": 2, -} - -func (x ConfChangeType) Enum() *ConfChangeType { - p := new(ConfChangeType) - *p = x - return p -} -func (x ConfChangeType) String() string { - return proto.EnumName(ConfChangeType_name, int32(x)) -} -func (x *ConfChangeType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") - if err != nil { - return err - } - *x = ConfChangeType(value) - return nil -} - -type Entry struct { - Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` - Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` - Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` - Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} - -type SnapshotMetadata struct { - ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state" json:"conf_state"` - Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} - -type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -type Message struct { - Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` - To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` - From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` - Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` - Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` - Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} - -type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} - -type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} - -func init() { - proto.RegisterType((*Entry)(nil), "raftpb.Entry") - proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") - proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") - proto.RegisterType((*Message)(nil), "raftpb.Message") - proto.RegisterType((*HardState)(nil), "raftpb.HardState") - proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") - proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") - proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) - proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) - proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) -} -func (m *Entry) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Entry) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - if m.Data != nil { - data[i] = 0x22 - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *SnapshotMetadata) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SnapshotMetadata) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintRaft(data, i, uint64(m.ConfState.Size())) - n1, err := m.ConfState.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Data != nil { - data[i] = 0xa - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - data[i] = 0x12 - i++ - i = encodeVarintRaft(data, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Message) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Message) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.To)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.From)) - data[i] = 0x20 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x28 - i++ - i = encodeVarintRaft(data, i, uint64(m.LogTerm)) - data[i] = 0x30 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - data[i] = 0x3a - i++ - i = encodeVarintRaft(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x40 - i++ - i = encodeVarintRaft(data, i, uint64(m.Commit)) - data[i] = 0x4a - i++ - i = encodeVarintRaft(data, i, uint64(m.Snapshot.Size())) - n3, err := m.Snapshot.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - data[i] = 0x50 - i++ - if m.Reject { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x58 - i++ - i = encodeVarintRaft(data, i, uint64(m.RejectHint)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HardState) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HardState) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Vote)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Commit)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfState) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfState) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Nodes) > 0 { - for _, num := range m.Nodes { - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(num)) - } - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfChange) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfChange) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.ID)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.NodeID)) - if m.Context != nil { - data[i] = 0x22 - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Context))) - i += copy(data[i:], m.Context) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Raft(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Raft(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRaft(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Entry) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Index)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotMetadata) Size() (n int) { - var l int - _ = l - l = m.ConfState.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 1 + sovRaft(uint64(m.Index)) - n += 1 + sovRaft(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovRaft(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Message) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.To)) - n += 1 + sovRaft(uint64(m.From)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.LogTerm)) - n += 1 + sovRaft(uint64(m.Index)) - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovRaft(uint64(l)) - } - } - n += 1 + sovRaft(uint64(m.Commit)) - l = m.Snapshot.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 2 - n += 1 + sovRaft(uint64(m.RejectHint)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HardState) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Vote)) - n += 1 + sovRaft(uint64(m.Commit)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfState) Size() (n int) { - var l int - _ = l - if len(m.Nodes) > 0 { - for _, e := range m.Nodes { - n += 1 + sovRaft(uint64(e)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfChange) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.ID)) - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.NodeID)) - if m.Context != nil { - l = len(m.Context) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRaft(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaft(x uint64) (n int) { - return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Entry) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Entry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (EntryType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotMetadata) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ConfState.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (MessageType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - m.To = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.To |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - m.From = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.From |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) - } - m.LogTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.LogTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, Entry{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Snapshot.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Reject = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) - } - m.RejectHint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.RejectHint |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HardState) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HardState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) - } - m.Vote = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Vote |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfState) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfChange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (ConfChangeType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Context = append(m.Context[:0], data[iNdEx:postIndex]...) - if m.Context == nil { - m.Context = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaft(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaft - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaft(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto deleted file mode 100644 index 0a98b8cfa..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto2"; -package raftpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -enum EntryType { - EntryNormal = 0; - EntryConfChange = 1; -} - -message Entry { - optional EntryType Type = 1 [(gogoproto.nullable) = false]; - optional uint64 Term = 2 [(gogoproto.nullable) = false]; - optional uint64 Index = 3 [(gogoproto.nullable) = false]; - optional bytes Data = 4; -} - -message SnapshotMetadata { - optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; - optional uint64 index = 2 [(gogoproto.nullable) = false]; - optional uint64 term = 3 [(gogoproto.nullable) = false]; -} - -message Snapshot { - optional bytes data = 1; - optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; -} - -enum MessageType { - MsgHup = 0; - MsgBeat = 1; - MsgProp = 2; - MsgApp = 3; - MsgAppResp = 4; - MsgVote = 5; - MsgVoteResp = 6; - MsgSnap = 7; - MsgHeartbeat = 8; - MsgHeartbeatResp = 9; - MsgUnreachable = 10; - MsgSnapStatus = 11; - MsgCheckQuorum = 12; -} - -message Message { - optional MessageType type = 1 [(gogoproto.nullable) = false]; - optional uint64 to = 2 [(gogoproto.nullable) = false]; - optional uint64 from = 3 [(gogoproto.nullable) = false]; - optional uint64 term = 4 [(gogoproto.nullable) = false]; - optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; - optional uint64 index = 6 [(gogoproto.nullable) = false]; - repeated Entry entries = 7 [(gogoproto.nullable) = false]; - optional uint64 commit = 8 [(gogoproto.nullable) = false]; - optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; - optional bool reject = 10 [(gogoproto.nullable) = false]; - optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; -} - -message HardState { - optional uint64 term = 1 [(gogoproto.nullable) = false]; - optional uint64 vote = 2 [(gogoproto.nullable) = false]; - optional uint64 commit = 3 [(gogoproto.nullable) = false]; -} - -message ConfState { - repeated uint64 nodes = 1; -} - -enum ConfChangeType { - ConfChangeAddNode = 0; - ConfChangeRemoveNode = 1; - ConfChangeUpdateNode = 2; -} - -message ConfChange { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; - optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; - optional bytes Context = 4; -} diff --git a/vendor/src/github.com/coreos/etcd/raft/rawnode.go b/vendor/src/github.com/coreos/etcd/raft/rawnode.go deleted file mode 100644 index 8cf085891..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/rawnode.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrStepLocalMsg is returned when try to step a local raft message -var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") - -// ErrStepPeerNotFound is returned when try to step a response message -// but there is no peer found in raft.prs for that node. -var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") - -// RawNode is a thread-unsafe Node. -// The methods of this struct correspond to the methods of Node and are described -// more fully there. -type RawNode struct { - raft *raft - prevSoftSt *SoftState - prevHardSt pb.HardState -} - -func (rn *RawNode) newReady() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -func (rn *RawNode) commitReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - if rn.prevHardSt.Commit != 0 { - // In most cases, prevHardSt and rd.HardState will be the same - // because when there are new entries to apply we just sent a - // HardState with an updated Commit value. However, on initial - // startup the two are different because we don't send a HardState - // until something changes, but we do send any un-applied but - // committed entries (and previously-committed entries may be - // incorporated into the snapshot, even if rd.CommittedEntries is - // empty). Therefore we mark all committed entries as applied - // whether they were included in rd.HardState or not. - rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) - } - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - rn.raft.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } -} - -// NewRawNode returns a new RawNode given configuration and a list of raft peers. -func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { - if config.ID == 0 { - panic("config.ID must not be zero") - } - r := newRaft(config) - rn := &RawNode{ - raft: r, - } - lastIndex, err := config.Storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - // If the log is empty, this is a new RawNode (like StartNode); otherwise it's - // restoring an existing RawNode (like RestartNode). - // TODO(bdarnell): rethink RawNode initialization and whether the application needs - // to be able to tell us when it expects the RawNode to exist. - if lastIndex == 0 { - r.becomeFollower(1, None) - ents := make([]pb.Entry, len(peers)) - for i, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - data, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - - ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} - } - r.raftLog.append(ents...) - r.raftLog.committed = uint64(len(ents)) - for _, peer := range peers { - r.addNode(peer.ID) - } - } - // Set the initial hard and soft states after performing all initialization. - rn.prevSoftSt = r.softState() - rn.prevHardSt = r.hardState() - - return rn, nil -} - -// Tick advances the internal logical clock by a single tick. -func (rn *RawNode) Tick() { - rn.raft.tick() -} - -// Campaign causes this RawNode to transition to candidate state. -func (rn *RawNode) Campaign() error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgHup, - }) -} - -// Propose proposes data be appended to the raft log. -func (rn *RawNode) Propose(data []byte) error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - From: rn.raft.id, - Entries: []pb.Entry{ - {Data: data}, - }}) -} - -// ProposeConfChange proposes a config change. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - Entries: []pb.Entry{ - {Type: pb.EntryConfChange, Data: data}, - }, - }) -} - -// ApplyConfChange applies a config change to the local node. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - if cc.NodeID == None { - rn.raft.resetPendingConf() - return &pb.ConfState{Nodes: rn.raft.nodes()} - } - switch cc.Type { - case pb.ConfChangeAddNode: - rn.raft.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - rn.raft.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - rn.raft.resetPendingConf() - default: - panic("unexpected conf type") - } - return &pb.ConfState{Nodes: rn.raft.nodes()} -} - -// Step advances the state machine using the given message. -func (rn *RawNode) Step(m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m) { - return ErrStepLocalMsg - } - if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m) { - return rn.raft.Step(m) - } - return ErrStepPeerNotFound -} - -// Ready returns the current point-in-time state of this RawNode. -func (rn *RawNode) Ready() Ready { - rd := rn.newReady() - rn.raft.msgs = nil - return rd -} - -// HasReady called when RawNode user need to check if any Ready pending. -// Checking logic in this method should be consistent with Ready.containsUpdates(). -func (rn *RawNode) HasReady() bool { - r := rn.raft - if !r.softState().equal(rn.prevSoftSt) { - return true - } - if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { - return true - } - if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { - return true - } - if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { - return true - } - return false -} - -// Advance notifies the RawNode that the application has applied and saved progress in the -// last Ready results. -func (rn *RawNode) Advance(rd Ready) { - rn.commitReady(rd) -} - -// Status returns the current status of the given group. -func (rn *RawNode) Status() *Status { - status := getStatus(rn.raft) - return &status -} - -// ReportUnreachable reports the given node is not reachable for the last send. -func (rn *RawNode) ReportUnreachable(id uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) -} - -// ReportSnapshot reports the status of the sent snapshot. -func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/status.go b/vendor/src/github.com/coreos/etcd/raft/status.go deleted file mode 100644 index d18a48954..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/status.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type Status struct { - ID uint64 - - pb.HardState - SoftState - - Applied uint64 - Progress map[uint64]Progress -} - -// getStatus gets a copy of the current raft status. -func getStatus(r *raft) Status { - s := Status{ID: r.id} - s.HardState = r.hardState() - s.SoftState = *r.softState() - - s.Applied = r.raftLog.applied - - if s.RaftState == StateLeader { - s.Progress = make(map[uint64]Progress) - for id, p := range r.prs { - s.Progress[id] = *p - } - } - - return s -} - -// MarshalJSON translates the raft status into JSON. -// TODO: try to simplify this by introducing ID type into raft -func (s Status) MarshalJSON() ([]byte, error) { - j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, - s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) - - if len(s.Progress) == 0 { - j += "}}" - } else { - for k, v := range s.Progress { - subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) - j += subj - } - // remove the trailing "," - j = j[:len(j)-1] + "}}" - } - return []byte(j), nil -} - -func (s Status) String() string { - b, err := s.MarshalJSON() - if err != nil { - raftLogger.Panicf("unexpected error: %v", err) - } - return string(b) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/storage.go b/vendor/src/github.com/coreos/etcd/raft/storage.go deleted file mode 100644 index f3724162c..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/storage.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - "sync" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrCompacted is returned by Storage.Entries/Compact when a requested -// index is unavailable because it predates the last snapshot. -var ErrCompacted = errors.New("requested index is unavailable due to compaction") - -// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested -// index is older than the existing snapshot. -var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") - -var ErrUnavailable = errors.New("requested entry at index is unavailable") - -// Storage is an interface that may be implemented by the application -// to retrieve log entries from storage. -// -// If any Storage method returns an error, the raft instance will -// become inoperable and refuse to participate in elections; the -// application is responsible for cleanup and recovery in this case. -type Storage interface { - // InitialState returns the saved HardState and ConfState information. - InitialState() (pb.HardState, pb.ConfState, error) - // Entries returns a slice of log entries in the range [lo,hi). - // MaxSize limits the total size of the log entries returned, but - // Entries returns at least one entry if any. - Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) - // Term returns the term of entry i, which must be in the range - // [FirstIndex()-1, LastIndex()]. The term of the entry before - // FirstIndex is retained for matching purposes even though the - // rest of that entry may not be available. - Term(i uint64) (uint64, error) - // LastIndex returns the index of the last entry in the log. - LastIndex() (uint64, error) - // FirstIndex returns the index of the first log entry that is - // possibly available via Entries (older entries have been incorporated - // into the latest Snapshot; if storage only contains the dummy entry the - // first log entry is not available). - FirstIndex() (uint64, error) - // Snapshot returns the most recent snapshot. - // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, - // so raft state machine could know that Storage needs some time to prepare - // snapshot and call Snapshot later. - Snapshot() (pb.Snapshot, error) -} - -// MemoryStorage implements the Storage interface backed by an -// in-memory array. -type MemoryStorage struct { - // Protects access to all fields. Most methods of MemoryStorage are - // run on the raft goroutine, but Append() is run on an application - // goroutine. - sync.Mutex - - hardState pb.HardState - snapshot pb.Snapshot - // ents[i] has raft log position i+snapshot.Metadata.Index - ents []pb.Entry -} - -// NewMemoryStorage creates an empty MemoryStorage. -func NewMemoryStorage() *MemoryStorage { - return &MemoryStorage{ - // When starting from scratch populate the list with a dummy entry at term zero. - ents: make([]pb.Entry, 1), - } -} - -// InitialState implements the Storage interface. -func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { - return ms.hardState, ms.snapshot.Metadata.ConfState, nil -} - -// SetHardState saves the current HardState. -func (ms *MemoryStorage) SetHardState(st pb.HardState) error { - ms.hardState = st - return nil -} - -// Entries implements the Storage interface. -func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if lo <= offset { - return nil, ErrCompacted - } - if hi > ms.lastIndex()+1 { - raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) - } - // only contains dummy entries. - if len(ms.ents) == 1 { - return nil, ErrUnavailable - } - - ents := ms.ents[lo-offset : hi-offset] - return limitSize(ents, maxSize), nil -} - -// Term implements the Storage interface. -func (ms *MemoryStorage) Term(i uint64) (uint64, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if i < offset { - return 0, ErrCompacted - } - return ms.ents[i-offset].Term, nil -} - -// LastIndex implements the Storage interface. -func (ms *MemoryStorage) LastIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.lastIndex(), nil -} - -func (ms *MemoryStorage) lastIndex() uint64 { - return ms.ents[0].Index + uint64(len(ms.ents)) - 1 -} - -// FirstIndex implements the Storage interface. -func (ms *MemoryStorage) FirstIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.firstIndex(), nil -} - -func (ms *MemoryStorage) firstIndex() uint64 { - return ms.ents[0].Index + 1 -} - -// Snapshot implements the Storage interface. -func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - return ms.snapshot, nil -} - -// ApplySnapshot overwrites the contents of this Storage object with -// those of the given snapshot. -func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { - ms.Lock() - defer ms.Unlock() - - // TODO: return ErrSnapOutOfDate? - ms.snapshot = snap - ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} - return nil -} - -// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and -// can be used to reconstruct the state at that point. -// If any configuration changes have been made since the last compaction, -// the result of the last ApplyConfChange must be passed in. -func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - if i <= ms.snapshot.Metadata.Index { - return pb.Snapshot{}, ErrSnapOutOfDate - } - - offset := ms.ents[0].Index - if i > ms.lastIndex() { - raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) - } - - ms.snapshot.Metadata.Index = i - ms.snapshot.Metadata.Term = ms.ents[i-offset].Term - if cs != nil { - ms.snapshot.Metadata.ConfState = *cs - } - ms.snapshot.Data = data - return ms.snapshot, nil -} - -// Compact discards all log entries prior to compactIndex. -// It is the application's responsibility to not attempt to compact an index -// greater than raftLog.applied. -func (ms *MemoryStorage) Compact(compactIndex uint64) error { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if compactIndex <= offset { - return ErrCompacted - } - if compactIndex > ms.lastIndex() { - raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) - } - - i := compactIndex - offset - ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) - ents[0].Index = ms.ents[i].Index - ents[0].Term = ms.ents[i].Term - ents = append(ents, ms.ents[i+1:]...) - ms.ents = ents - return nil -} - -// Append the new entries to storage. -// TODO (xiangli): ensure the entries are continuous and -// entries[0].Index > ms.entries[0].Index -func (ms *MemoryStorage) Append(entries []pb.Entry) error { - ms.Lock() - defer ms.Unlock() - if len(entries) == 0 { - return nil - } - first := ms.ents[0].Index + 1 - last := entries[0].Index + uint64(len(entries)) - 1 - - // shortcut if there is no new entry. - if last < first { - return nil - } - // truncate compacted entries - if first > entries[0].Index { - entries = entries[first-entries[0].Index:] - } - - offset := entries[0].Index - ms.ents[0].Index - switch { - case uint64(len(ms.ents)) > offset: - ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) - ms.ents = append(ms.ents, entries...) - case uint64(len(ms.ents)) == offset: - ms.ents = append(ms.ents, entries...) - default: - raftLogger.Panicf("missing log entry [last: %d, append at: %d]", - ms.lastIndex(), entries[0].Index) - } - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/raft/util.go b/vendor/src/github.com/coreos/etcd/raft/util.go deleted file mode 100644 index 8d4c41900..000000000 --- a/vendor/src/github.com/coreos/etcd/raft/util.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -func (st StateType) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", st.String())), nil -} - -// uint64Slice implements sort interface -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func min(a, b uint64) uint64 { - if a > b { - return b - } - return a -} - -func max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func IsLocalMsg(m pb.Message) bool { - return m.Type == pb.MsgHup || m.Type == pb.MsgBeat || m.Type == pb.MsgUnreachable || m.Type == pb.MsgSnapStatus || m.Type == pb.MsgCheckQuorum -} - -func IsResponseMsg(m pb.Message) bool { - return m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgHeartbeatResp || m.Type == pb.MsgUnreachable -} - -// EntryFormatter can be implemented by the application to provide human-readable formatting -// of entry data. Nil is a valid EntryFormatter and will use a default format. -type EntryFormatter func([]byte) string - -// DescribeMessage returns a concise human-readable description of a -// Message for debugging. -func DescribeMessage(m pb.Message, f EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected") - if m.RejectHint != 0 { - fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) - } - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) - } - return buf.String() -} - -// DescribeEntry returns a concise human-readable description of an -// Entry for debugging. -func DescribeEntry(e pb.Entry, f EntryFormatter) string { - var formatted string - if e.Type == pb.EntryNormal && f != nil { - formatted = f(e.Data) - } else { - formatted = fmt.Sprintf("%q", e.Data) - } - return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) -} - -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { - if len(ents) == 0 { - return ents - } - size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { - size += ents[limit].Size() - if uint64(size) > maxSize { - break - } - } - return ents[:limit] -} diff --git a/vendor/src/github.com/coreos/etcd/snap/db.go b/vendor/src/github.com/coreos/etcd/snap/db.go deleted file mode 100644 index ca68837cb..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/db.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/coreos/etcd/pkg/fileutil" -) - -// SaveDBFrom saves snapshot of the database from the given reader. It -// guarantees the save operation is atomic. -func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) error { - f, err := ioutil.TempFile(s.dir, "tmp") - if err != nil { - return err - } - var n int64 - n, err = io.Copy(f, r) - if err == nil { - err = f.Sync() - } - f.Close() - if err != nil { - os.Remove(f.Name()) - return err - } - fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) - if fileutil.Exist(fn) { - os.Remove(f.Name()) - return nil - } - err = os.Rename(f.Name(), fn) - if err != nil { - os.Remove(f.Name()) - return err - } - - plog.Infof("saved database snapshot to disk [total bytes: %d]", n) - - return nil -} - -// DBFilePath returns the file path for the snapshot of the database with -// given id. If the snapshot does not exist, it returns error. -func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { - return "", err - } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return path.Join(s.dir, fn), nil - } - } - return "", fmt.Errorf("snap: snapshot file doesn't exist") -} diff --git a/vendor/src/github.com/coreos/etcd/snap/message.go b/vendor/src/github.com/coreos/etcd/snap/message.go deleted file mode 100644 index 2d2b21106..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/message.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "io" - - "github.com/coreos/etcd/raft/raftpb" -) - -// Message is a struct that contains a raft Message and a ReadCloser. The type -// of raft message MUST be MsgSnap, which contains the raft meta-data and an -// additional data []byte field that contains the snapshot of the actual state -// machine. -// Message contains the ReadCloser field for handling large snapshot. This avoid -// copying the entire snapshot into a byte array, which consumes a lot of memory. -// -// User of Message should close the Message after sending it. -type Message struct { - raftpb.Message - ReadCloser io.ReadCloser - closeC chan bool -} - -func NewMessage(rs raftpb.Message, rc io.ReadCloser) *Message { - return &Message{ - Message: rs, - ReadCloser: rc, - closeC: make(chan bool, 1), - } -} - -// CloseNotify returns a channel that receives a single value -// when the message sent is finished. true indicates the sent -// is successful. -func (m Message) CloseNotify() <-chan bool { - return m.closeC -} - -func (m Message) CloseWithError(err error) { - m.ReadCloser.Close() - if err == nil { - m.closeC <- true - } else { - m.closeC <- false - } -} diff --git a/vendor/src/github.com/coreos/etcd/snap/metrics.go b/vendor/src/github.com/coreos/etcd/snap/metrics.go deleted file mode 100644 index 88aad5dc9..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/metrics.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import "github.com/prometheus/client_golang/prometheus" - -var ( - // TODO: save_fsync latency? - saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snapshot", - Name: "save_total_durations_seconds", - Help: "The total latency distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snapshot", - Name: "save_marshalling_durations_seconds", - Help: "The marshalling cost distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(saveDurations) - prometheus.MustRegister(marshallingDurations) -} diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go deleted file mode 100644 index 5d1d21ab3..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: snap.proto -// DO NOT EDIT! - -/* - Package snappb is a generated protocol buffer package. - - It is generated from these files: - snap.proto - - It has these top-level messages: - Snapshot -*/ -package snappb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Snapshot struct { - Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func init() { - proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") -} -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintSnap(data, i, uint64(m.Crc)) - if m.Data != nil { - data[i] = 0x12 - i++ - i = encodeVarintSnap(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Snap(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snap(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintSnap(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovSnap(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovSnap(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnap(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozSnap(x uint64) (n int) { - return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnap - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnap(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSnap - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnap(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthSnap - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSnap(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto deleted file mode 100644 index cd3d21d0e..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto2"; -package snappb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message snapshot { - optional uint32 crc = 1 [(gogoproto.nullable) = false]; - optional bytes data = 2; -} diff --git a/vendor/src/github.com/coreos/etcd/snap/snapshotter.go b/vendor/src/github.com/coreos/etcd/snap/snapshotter.go deleted file mode 100644 index 4e06483a8..000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snapshotter.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snap stores raft nodes' states with snapshots. -package snap - -import ( - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "path" - "sort" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap/snappb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - snapSuffix = ".snap" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") - - ErrNoSnapshot = errors.New("snap: no available snapshot") - ErrEmptySnapshot = errors.New("snap: empty snapshot") - ErrCRCMismatch = errors.New("snap: crc mismatch") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -type Snapshotter struct { - dir string -} - -func New(dir string) *Snapshotter { - return &Snapshotter{ - dir: dir, - } -} - -func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { - if raft.IsEmptySnap(snapshot) { - return nil - } - return s.save(&snapshot) -} - -func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { - start := time.Now() - - fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) - b := pbutil.MustMarshal(snapshot) - crc := crc32.Update(0, crcTable, b) - snap := snappb.Snapshot{Crc: crc, Data: b} - d, err := snap.Marshal() - if err != nil { - return err - } else { - marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } - - err = ioutil.WriteFile(path.Join(s.dir, fname), d, 0666) - if err == nil { - saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } - return err -} - -func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { - names, err := s.snapNames() - if err != nil { - return nil, err - } - var snap *raftpb.Snapshot - for _, name := range names { - if snap, err = loadSnap(s.dir, name); err == nil { - break - } - } - if err != nil { - return nil, ErrNoSnapshot - } - return snap, nil -} - -func loadSnap(dir, name string) (*raftpb.Snapshot, error) { - fpath := path.Join(dir, name) - snap, err := Read(fpath) - if err != nil { - renameBroken(fpath) - } - return snap, err -} - -// Read reads the snapshot named by snapname and returns the snapshot. -func Read(snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) - if err != nil { - plog.Errorf("cannot read file %v: %v", snapname, err) - return nil, err - } - - if len(b) == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - var serializedSnap snappb.Snapshot - if err = serializedSnap.Unmarshal(b); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - - if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - crc := crc32.Update(0, crcTable, serializedSnap.Data) - if crc != serializedSnap.Crc { - plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) - return nil, ErrCRCMismatch - } - - var snap raftpb.Snapshot - if err = snap.Unmarshal(serializedSnap.Data); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - return &snap, nil -} - -// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). -// If there is no available snapshots, an ErrNoSnapshot will be returned. -func (s *Snapshotter) snapNames() ([]string, error) { - dir, err := os.Open(s.dir) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - snaps := checkSuffix(names) - if len(snaps) == 0 { - return nil, ErrNoSnapshot - } - sort.Sort(sort.Reverse(sort.StringSlice(snaps))) - return snaps, nil -} - -func checkSuffix(names []string) []string { - snaps := []string{} - for i := range names { - if strings.HasSuffix(names[i], snapSuffix) { - snaps = append(snaps, names[i]) - } else { - plog.Warningf("skipped unexpected non snapshot file %v", names[i]) - } - } - return snaps -} - -func renameBroken(path string) { - brokenPath := path + ".broken" - if err := os.Rename(path, brokenPath); err != nil { - plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) - } -} diff --git a/vendor/src/github.com/coreos/etcd/wal/decoder.go b/vendor/src/github.com/coreos/etcd/wal/decoder.go deleted file mode 100644 index f75c919fb..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/decoder.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bufio" - "encoding/binary" - "hash" - "io" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" -) - -type decoder struct { - mu sync.Mutex - br *bufio.Reader - - c io.Closer - crc hash.Hash32 -} - -func newDecoder(rc io.ReadCloser) *decoder { - return &decoder{ - br: bufio.NewReader(rc), - c: rc, - crc: crc.New(0, crcTable), - } -} - -func (d *decoder) decode(rec *walpb.Record) error { - d.mu.Lock() - defer d.mu.Unlock() - - rec.Reset() - l, err := readInt64(d.br) - if err != nil { - return err - } - data := make([]byte, l) - if _, err = io.ReadFull(d.br, data); err != nil { - // ReadFull returns io.EOF only if no bytes were read - // the decoder should treat this as an ErrUnexpectedEOF instead. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - if err := rec.Unmarshal(data); err != nil { - return err - } - // skip crc checking if the record type is crcType - if rec.Type == crcType { - return nil - } - d.crc.Write(rec.Data) - return rec.Validate(d.crc.Sum32()) -} - -func (d *decoder) updateCRC(prevCrc uint32) { - d.crc = crc.New(prevCrc, crcTable) -} - -func (d *decoder) lastCRC() uint32 { - return d.crc.Sum32() -} - -func (d *decoder) close() error { - return d.c.Close() -} - -func mustUnmarshalEntry(d []byte) raftpb.Entry { - var e raftpb.Entry - pbutil.MustUnmarshal(&e, d) - return e -} - -func mustUnmarshalState(d []byte) raftpb.HardState { - var s raftpb.HardState - pbutil.MustUnmarshal(&s, d) - return s -} - -func readInt64(r io.Reader) (int64, error) { - var n int64 - err := binary.Read(r, binary.LittleEndian, &n) - return n, err -} diff --git a/vendor/src/github.com/coreos/etcd/wal/doc.go b/vendor/src/github.com/coreos/etcd/wal/doc.go deleted file mode 100644 index 769b522f0..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package wal provides an implementation of a write ahead log that is used by -etcd. - -A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside of each file the raft state and entries are appended -to it with the Save method: - - metadata := []byte{} - w, err := wal.Create("/var/lib/etcd", metadata) - ... - err := w.Save(s, ents) - -After saving an raft snapshot to disk, SaveSnapshot method should be called to -record it. So WAL can match with the saved snapshot when restarting. - - err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) - -When a user has finished using a WAL it must be closed: - - w.Close() - -WAL files are placed inside of the directory in the following format: -$seq-$index.wal - -The first WAL file to be created will be 0000000000000000-0000000000000000.wal -indicating an initial sequence of 0 and an initial raft index of 0. The first -entry written to WAL MUST have raft index 0. - -WAL will cuts its current wal files if its size exceeds 8MB. This will increment an internal -sequence number and cause a new file to be created. If the last raft index saved -was 0x20 and this is the first time cut has been called on this WAL then the sequence will -increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later then the file will be called: -0000000000000002-0000000000000031.wal. - -At a later time a WAL can be opened at a particular snapshot. If there is no -snapshot, an empty snapshot should be passed in. - - w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) - ... - -The snapshot must have been written to the WAL. - -Additional items cannot be Saved to this WAL until all of the items from the given -snapshot to the end of the WAL are read first: - - metadata, state, ents, err := w.ReadAll() - -This will give you the metadata, the last raft.State and the slice of -raft.Entry items in the log. - -*/ -package wal diff --git a/vendor/src/github.com/coreos/etcd/wal/encoder.go b/vendor/src/github.com/coreos/etcd/wal/encoder.go deleted file mode 100644 index f5b73fe12..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/encoder.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bufio" - "encoding/binary" - "hash" - "io" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/wal/walpb" -) - -type encoder struct { - mu sync.Mutex - bw *bufio.Writer - - crc hash.Hash32 - buf []byte - uint64buf []byte -} - -func newEncoder(w io.Writer, prevCrc uint32) *encoder { - return &encoder{ - bw: bufio.NewWriter(w), - crc: crc.New(prevCrc, crcTable), - // 1MB buffer - buf: make([]byte, 1024*1024), - uint64buf: make([]byte, 8), - } -} - -func (e *encoder) encode(rec *walpb.Record) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.crc.Write(rec.Data) - rec.Crc = e.crc.Sum32() - var ( - data []byte - err error - n int - ) - - if rec.Size() > len(e.buf) { - data, err = rec.Marshal() - if err != nil { - return err - } - } else { - n, err = rec.MarshalTo(e.buf) - if err != nil { - return err - } - data = e.buf[:n] - } - if err = writeInt64(e.bw, int64(len(data)), e.uint64buf); err != nil { - return err - } - _, err = e.bw.Write(data) - return err -} - -func (e *encoder) flush() error { - e.mu.Lock() - defer e.mu.Unlock() - return e.bw.Flush() -} - -func writeInt64(w io.Writer, n int64, buf []byte) error { - // http://golang.org/src/encoding/binary/binary.go - binary.LittleEndian.PutUint64(buf, uint64(n)) - _, err := w.Write(buf) - return err -} diff --git a/vendor/src/github.com/coreos/etcd/wal/metrics.go b/vendor/src/github.com/coreos/etcd/wal/metrics.go deleted file mode 100644 index ed270fac6..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "github.com/prometheus/client_golang/prometheus" - -var ( - syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "wal", - Name: "fsync_durations_seconds", - Help: "The latency distributions of fsync called by wal.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - lastIndexSaved = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "wal", - Name: "last_index_saved", - Help: "The index of the last entry saved by wal.", - }) -) - -func init() { - prometheus.MustRegister(syncDurations) - prometheus.MustRegister(lastIndexSaved) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go b/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go deleted file mode 100644 index 513c6d17d..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "io" - -type multiReadCloser struct { - closers []io.Closer - reader io.Reader -} - -func (mc *multiReadCloser) Close() error { - var err error - for i := range mc.closers { - err = mc.closers[i].Close() - } - return err -} - -func (mc *multiReadCloser) Read(p []byte) (int, error) { - return mc.reader.Read(p) -} - -func MultiReadCloser(readClosers ...io.ReadCloser) io.ReadCloser { - cs := make([]io.Closer, len(readClosers)) - rs := make([]io.Reader, len(readClosers)) - for i := range readClosers { - cs[i] = readClosers[i] - rs[i] = readClosers[i] - } - r := io.MultiReader(rs...) - return &multiReadCloser{cs, r} -} diff --git a/vendor/src/github.com/coreos/etcd/wal/repair.go b/vendor/src/github.com/coreos/etcd/wal/repair.go deleted file mode 100644 index bcc22ef08..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/repair.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "io" - "os" - "path" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/wal/walpb" -) - -// Repair tries to repair ErrUnexpectedEOF in the -// last wal file by truncating. -func Repair(dirpath string) bool { - f, err := openLast(dirpath) - if err != nil { - return false - } - defer f.Close() - - n := 0 - rec := &walpb.Record{} - - decoder := newDecoder(f) - defer decoder.close() - for { - err := decoder.decode(rec) - switch err { - case nil: - n += 8 + rec.Size() - // update crc of the decoder when necessary - switch rec.Type { - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - return false - } - decoder.updateCRC(rec.Crc) - } - continue - case io.EOF: - return true - case io.ErrUnexpectedEOF: - plog.Noticef("repairing %v", f.Name()) - bf, bferr := os.Create(f.Name() + ".broken") - if bferr != nil { - plog.Errorf("could not repair %v, failed to create backup file", f.Name()) - return false - } - defer bf.Close() - - if _, err = f.Seek(0, os.SEEK_SET); err != nil { - plog.Errorf("could not repair %v, failed to read file", f.Name()) - return false - } - - if _, err = io.Copy(bf, f); err != nil { - plog.Errorf("could not repair %v, failed to copy file", f.Name()) - return false - } - - if err = f.Truncate(int64(n)); err != nil { - plog.Errorf("could not repair %v, failed to truncate file", f.Name()) - return false - } - if err = f.Sync(); err != nil { - plog.Errorf("could not repair %v, failed to sync file", f.Name()) - return false - } - return true - default: - plog.Errorf("could not repair error (%v)", err) - return false - } - } -} - -// openLast opens the last wal file for read and write. -func openLast(dirpath string) (*os.File, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, err - } - names = checkWalNames(names) - if len(names) == 0 { - return nil, ErrFileNotFound - } - last := path.Join(dirpath, names[len(names)-1]) - return os.OpenFile(last, os.O_RDWR, 0) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/util.go b/vendor/src/github.com/coreos/etcd/wal/util.go deleted file mode 100644 index 9588b6ec0..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/util.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "strings" - - "github.com/coreos/etcd/pkg/fileutil" -) - -var ( - badWalName = errors.New("bad wal name") -) - -func Exist(dirpath string) bool { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return false - } - return len(names) != 0 -} - -// searchIndex returns the last array index of names whose raft index section is -// equal to or smaller than the given index. -// The given names MUST be sorted. -func searchIndex(names []string, index uint64) (int, bool) { - for i := len(names) - 1; i >= 0; i-- { - name := names[i] - _, curIndex, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if index >= curIndex { - return i, true - } - } - return -1, false -} - -// names should have been sorted based on sequence number. -// isValidSeq checks whether seq increases continuously. -func isValidSeq(names []string) bool { - var lastSeq uint64 - for _, name := range names { - curSeq, _, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if lastSeq != 0 && lastSeq != curSeq-1 { - return false - } - lastSeq = curSeq - } - return true -} - -func checkWalNames(names []string) []string { - wnames := make([]string, 0) - for _, name := range names { - if _, _, err := parseWalName(name); err != nil { - plog.Warningf("ignored file %v in wal", name) - continue - } - wnames = append(wnames, name) - } - return wnames -} - -func parseWalName(str string) (seq, index uint64, err error) { - if !strings.HasSuffix(str, ".wal") { - return 0, 0, badWalName - } - _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) - return seq, index, err -} - -func walName(seq, index uint64) string { - return fmt.Sprintf("%016x-%016x.wal", seq, index) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/wal.go b/vendor/src/github.com/coreos/etcd/wal/wal.go deleted file mode 100644 index f9a58ca38..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/wal.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path" - "reflect" - "sync" - "time" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - metadataType int64 = iota + 1 - entryType - stateType - crcType - snapshotType - - // the owner can make/remove files inside the directory - privateDirMode = 0700 - - // the expected size of each wal segment file. - // the actual size might be bigger than it. - segmentSizeBytes = 64 * 1000 * 1000 // 64MB -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") - - ErrMetadataConflict = errors.New("wal: conflicting metadata found") - ErrFileNotFound = errors.New("wal: file not found") - ErrCRCMismatch = errors.New("wal: crc mismatch") - ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") - ErrSnapshotNotFound = errors.New("wal: snapshot not found") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -// WAL is a logical representation of the stable storage. -// WAL is either in read mode or append mode but not both. -// A newly created WAL is in append mode, and ready for appending records. -// A just opened WAL is in read mode, and ready for reading records. -// The WAL will be ready for appending after reading out all the previous records. -type WAL struct { - dir string // the living directory of the underlay files - metadata []byte // metadata recorded at the head of each WAL - state raftpb.HardState // hardstate recorded at the head of WAL - - start walpb.Snapshot // snapshot to start reading - decoder *decoder // decoder to decode records - - mu sync.Mutex - f *os.File // underlay file opened for appending, sync - seq uint64 // sequence of the wal file currently used for writes - enti uint64 // index of the last entry saved to the wal - encoder *encoder // encoder to encode records - - locks []fileutil.Lock // the file locks the WAL is holding (the name is increasing) -} - -// Create creates a WAL ready for appending records. The given metadata is -// recorded at the head of each WAL file, and can be retrieved with ReadAll. -func Create(dirpath string, metadata []byte) (*WAL, error) { - if Exist(dirpath) { - return nil, os.ErrExist - } - - if err := os.MkdirAll(dirpath, privateDirMode); err != nil { - return nil, err - } - - p := path.Join(dirpath, walName(0, 0)) - f, err := os.OpenFile(p, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return nil, err - } - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return nil, err - } - if err = l.Lock(); err != nil { - return nil, err - } - - w := &WAL{ - dir: dirpath, - metadata: metadata, - seq: 0, - f: f, - encoder: newEncoder(f, 0), - } - w.locks = append(w.locks, l) - if err := w.saveCrc(0); err != nil { - return nil, err - } - if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { - return nil, err - } - if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil { - return nil, err - } - return w, nil -} - -// Open opens the WAL at the given snap. -// The snap SHOULD have been previously saved to the WAL, or the following -// ReadAll will fail. -// The returned WAL is ready to read and the first record will be the one after -// the given snap. The WAL cannot be appended to before reading out all of its -// previous records. -func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, true) -} - -// OpenForRead only opens the wal files for read. -// Write on a read only wal panics. -func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, false) -} - -func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, err - } - names = checkWalNames(names) - if len(names) == 0 { - return nil, ErrFileNotFound - } - - nameIndex, ok := searchIndex(names, snap.Index) - if !ok || !isValidSeq(names[nameIndex:]) { - return nil, ErrFileNotFound - } - - // open the wal files for reading - rcs := make([]io.ReadCloser, 0) - ls := make([]fileutil.Lock, 0) - for _, name := range names[nameIndex:] { - f, err := os.Open(path.Join(dirpath, name)) - if err != nil { - return nil, err - } - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return nil, err - } - err = l.TryLock() - if err != nil { - if write { - return nil, err - } - } - rcs = append(rcs, f) - ls = append(ls, l) - } - rc := MultiReadCloser(rcs...) - - // create a WAL ready for reading - w := &WAL{ - dir: dirpath, - start: snap, - decoder: newDecoder(rc), - locks: ls, - } - - if write { - // open the last wal file for appending - seq, _, err := parseWalName(names[len(names)-1]) - if err != nil { - rc.Close() - return nil, err - } - last := path.Join(dirpath, names[len(names)-1]) - - f, err := os.OpenFile(last, os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - rc.Close() - return nil, err - } - err = fileutil.Preallocate(f, segmentSizeBytes) - if err != nil { - rc.Close() - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) - return nil, err - } - - w.f = f - w.seq = seq - } - - return w, nil -} - -// ReadAll reads out records of the current WAL. -// If opened in write mode, it must read out all records until EOF. Or an error -// will be returned. -// If opened in read mode, it will try to read all records if possible. -// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. -// If loaded snap doesn't match with the expected one, it will return -// all the records and error ErrSnapshotMismatch. -// TODO: detect not-last-snap error. -// TODO: maybe loose the checking of match. -// After ReadAll, the WAL will be ready for appending new records. -func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{} - decoder := w.decoder - - var match bool - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { - switch rec.Type { - case entryType: - e := mustUnmarshalEntry(rec.Data) - if e.Index > w.start.Index { - ents = append(ents[:e.Index-w.start.Index-1], e) - } - w.enti = e.Index - case stateType: - state = mustUnmarshalState(rec.Data) - case metadataType: - if metadata != nil && !reflect.DeepEqual(metadata, rec.Data) { - state.Reset() - return nil, state, nil, ErrMetadataConflict - } - metadata = rec.Data - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - state.Reset() - return nil, state, nil, ErrCRCMismatch - } - decoder.updateCRC(rec.Crc) - case snapshotType: - var snap walpb.Snapshot - pbutil.MustUnmarshal(&snap, rec.Data) - if snap.Index == w.start.Index { - if snap.Term != w.start.Term { - state.Reset() - return nil, state, nil, ErrSnapshotMismatch - } - match = true - } - default: - state.Reset() - return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) - } - } - - switch w.f { - case nil: - // We do not have to read out all entries in read mode. - // The last record maybe a partial written one, so - // ErrunexpectedEOF might be returned. - if err != io.EOF && err != io.ErrUnexpectedEOF { - state.Reset() - return nil, state, nil, err - } - default: - // We must read all of the entries if WAL is opened in write mode. - if err != io.EOF { - state.Reset() - return nil, state, nil, err - } - } - - err = nil - if !match { - err = ErrSnapshotNotFound - } - - // close decoder, disable reading - w.decoder.close() - w.start = walpb.Snapshot{} - - w.metadata = metadata - - if w.f != nil { - // create encoder (chain crc with the decoder), enable appending - w.encoder = newEncoder(w.f, w.decoder.lastCRC()) - w.decoder = nil - lastIndexSaved.Set(float64(w.enti)) - } - - return metadata, state, ents, err -} - -// cut closes current file written and creates a new one ready to append. -// cut first creates a temp wal file and writes necessary headers into it. -// Then cut atomically rename temp wal file to a wal file. -func (w *WAL) cut() error { - // close old wal file - if err := w.sync(); err != nil { - return err - } - if err := w.f.Close(); err != nil { - return err - } - - fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1)) - ftpath := fpath + ".tmp" - - // create a temp wal file with name sequence + 1, or truncate the existing one - ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - - // update writer and save the previous crc - w.f = ft - prevCrc := w.encoder.crc.Sum32() - w.encoder = newEncoder(w.f, prevCrc) - if err = w.saveCrc(prevCrc); err != nil { - return err - } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { - return err - } - if err = w.saveState(&w.state); err != nil { - return err - } - // close temp wal file - if err = w.sync(); err != nil { - return err - } - if err = w.f.Close(); err != nil { - return err - } - - // atomically move temp wal file to wal file - if err = os.Rename(ftpath, fpath); err != nil { - return err - } - - // open the wal file and update writer again - f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600) - if err != nil { - return err - } - if err = fileutil.Preallocate(f, segmentSizeBytes); err != nil { - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) - return err - } - - w.f = f - prevCrc = w.encoder.crc.Sum32() - w.encoder = newEncoder(w.f, prevCrc) - - // lock the new wal file - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return err - } - - if err := l.Lock(); err != nil { - return err - } - w.locks = append(w.locks, l) - - // increase the wal seq - w.seq++ - - plog.Infof("segmented wal file %v is created", fpath) - return nil -} - -func (w *WAL) sync() error { - if w.encoder != nil { - if err := w.encoder.flush(); err != nil { - return err - } - } - start := time.Now() - err := fileutil.Fdatasync(w.f) - syncDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - return err -} - -// ReleaseLockTo releases the locks, which has smaller index than the given index -// except the largest one among them. -// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release -// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. -func (w *WAL) ReleaseLockTo(index uint64) error { - w.mu.Lock() - defer w.mu.Unlock() - - var smaller int - found := false - - for i, l := range w.locks { - _, lockIndex, err := parseWalName(path.Base(l.Name())) - if err != nil { - return err - } - if lockIndex >= index { - smaller = i - 1 - found = true - break - } - } - - // if no lock index is greater than the release index, we can - // release lock up to the last one(excluding). - if !found && len(w.locks) != 0 { - smaller = len(w.locks) - 1 - } - - if smaller <= 0 { - return nil - } - - for i := 0; i < smaller; i++ { - w.locks[i].Unlock() - w.locks[i].Destroy() - } - w.locks = w.locks[smaller:] - - return nil -} - -func (w *WAL) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.f != nil { - if err := w.sync(); err != nil { - return err - } - if err := w.f.Close(); err != nil { - return err - } - } - for _, l := range w.locks { - err := l.Unlock() - if err != nil { - plog.Errorf("failed to unlock during closing wal: %s", err) - } - err = l.Destroy() - if err != nil { - plog.Errorf("failed to destroy lock during closing wal: %s", err) - } - } - return nil -} - -func (w *WAL) saveEntry(e *raftpb.Entry) error { - // TODO: add MustMarshalTo to reduce one allocation. - b := pbutil.MustMarshal(e) - rec := &walpb.Record{Type: entryType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - w.enti = e.Index - lastIndexSaved.Set(float64(w.enti)) - return nil -} - -func (w *WAL) saveState(s *raftpb.HardState) error { - if raft.IsEmptyHardState(*s) { - return nil - } - w.state = *s - b := pbutil.MustMarshal(s) - rec := &walpb.Record{Type: stateType, Data: b} - return w.encoder.encode(rec) -} - -func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { - w.mu.Lock() - defer w.mu.Unlock() - - // short cut, do not call sync - if raft.IsEmptyHardState(st) && len(ents) == 0 { - return nil - } - - mustSync := mustSync(st, w.state, len(ents)) - - // TODO(xiangli): no more reference operator - for i := range ents { - if err := w.saveEntry(&ents[i]); err != nil { - return err - } - } - if err := w.saveState(&st); err != nil { - return err - } - - fstat, err := w.f.Stat() - if err != nil { - return err - } - if fstat.Size() < segmentSizeBytes { - if mustSync { - return w.sync() - } - return nil - } - // TODO: add a test for this code path when refactoring the tests - return w.cut() -} - -func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { - w.mu.Lock() - defer w.mu.Unlock() - - b := pbutil.MustMarshal(&e) - rec := &walpb.Record{Type: snapshotType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - // update enti only when snapshot is ahead of last index - if w.enti < e.Index { - w.enti = e.Index - } - lastIndexSaved.Set(float64(w.enti)) - return w.sync() -} - -func (w *WAL) saveCrc(prevCrc uint32) error { - return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) -} - -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - if entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term { - return true - } - return false -} diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.go deleted file mode 100644 index bb5368569..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walpb - -import "errors" - -var ( - ErrCRCMismatch = errors.New("walpb: crc mismatch") -) - -func (rec *Record) Validate(crc uint32) error { - if rec.Crc == crc { - return nil - } - rec.Reset() - return ErrCRCMismatch -} diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go deleted file mode 100644 index 638bdc3b6..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go +++ /dev/null @@ -1,495 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: record.proto -// DO NOT EDIT! - -/* - Package walpb is a generated protocol buffer package. - - It is generated from these files: - record.proto - - It has these top-level messages: - Record - Snapshot -*/ -package walpb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Record struct { - Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` - Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Record) Reset() { *m = Record{} } -func (m *Record) String() string { return proto.CompactTextString(m) } -func (*Record) ProtoMessage() {} - -type Snapshot struct { - Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func init() { - proto.RegisterType((*Record)(nil), "walpb.Record") - proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") -} -func (m *Record) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Record) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRecord(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRecord(data, i, uint64(m.Crc)) - if m.Data != nil { - data[i] = 0x1a - i++ - i = encodeVarintRecord(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRecord(data, i, uint64(m.Index)) - data[i] = 0x10 - i++ - i = encodeVarintRecord(data, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Record(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Record(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRecord(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Record) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Type)) - n += 1 + sovRecord(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRecord(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Index)) - n += 1 + sovRecord(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRecord(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRecord(x uint64) (n int) { - return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Record) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Record: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRecord - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRecord(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRecord(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRecord(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRecord - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRecord(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto deleted file mode 100644 index b694cb233..000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto2"; -package walpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Record { - optional int64 type = 1 [(gogoproto.nullable) = false]; - optional uint32 crc = 2 [(gogoproto.nullable) = false]; - optional bytes data = 3; -} - -message Snapshot { - optional uint64 index = 1 [(gogoproto.nullable) = false]; - optional uint64 term = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/src/github.com/coreos/go-systemd/LICENSE b/vendor/src/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/src/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/coreos/go-systemd/activation/files.go b/vendor/src/github.com/coreos/go-systemd/activation/files.go deleted file mode 100644 index c8e85fcd5..000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/files.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package activation implements primitives for systemd socket activation. -package activation - -import ( - "os" - "strconv" - "syscall" -) - -// based on: https://gist.github.com/alberts/4640792 -const ( - listenFdsStart = 3 -) - -func Files(unsetEnv bool) []*os.File { - if unsetEnv { - defer os.Unsetenv("LISTEN_PID") - defer os.Unsetenv("LISTEN_FDS") - } - - pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) - if err != nil || pid != os.Getpid() { - return nil - } - - nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) - if err != nil || nfds == 0 { - return nil - } - - files := make([]*os.File, 0, nfds) - for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { - syscall.CloseOnExec(fd) - files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) - } - - return files -} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/listeners.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go deleted file mode 100644 index df27c29e9..000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/listeners.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package activation - -import ( - "crypto/tls" - "net" -) - -// Listeners returns a slice containing a net.Listener for each matching socket type -// passed to this process. -// -// The order of the file descriptors is preserved in the returned slice. -// Nil values are used to fill any gaps. For example if systemd were to return file descriptors -// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} -func Listeners(unsetEnv bool) ([]net.Listener, error) { - files := Files(unsetEnv) - listeners := make([]net.Listener, len(files)) - - for i, f := range files { - if pc, err := net.FileListener(f); err == nil { - listeners[i] = pc - } - } - return listeners, nil -} - -// TLSListeners returns a slice containing a net.listener for each matching TCP socket type -// passed to this process. -// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. -func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { - listeners, err := Listeners(unsetEnv) - - if listeners == nil || err != nil { - return nil, err - } - - if tlsConfig != nil && err == nil { - tlsConfig.NextProtos = []string{"http/1.1"} - - for i, l := range listeners { - // Activate TLS only for TCP sockets - if l.Addr().Network() == "tcp" { - listeners[i] = tls.NewListener(l, tlsConfig) - } - } - } - - return listeners, err -} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go deleted file mode 100644 index 48b2ca029..000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package activation - -import ( - "net" -) - -// PacketConns returns a slice containing a net.PacketConn for each matching socket type -// passed to this process. -// -// The order of the file descriptors is preserved in the returned slice. -// Nil values are used to fill any gaps. For example if systemd were to return file descriptors -// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} -func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { - files := Files(unsetEnv) - conns := make([]net.PacketConn, len(files)) - - for i, f := range files { - if pc, err := net.FilePacketConn(f); err == nil { - conns[i] = pc - } - } - return conns, nil -} diff --git a/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go deleted file mode 100644 index b92b1911c..000000000 --- a/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code forked from Docker project -package daemon - -import ( - "errors" - "net" - "os" -) - -var SdNotifyNoSocket = errors.New("No socket") - -// SdNotify sends a message to the init daemon. It is common to ignore the error. -func SdNotify(state string) error { - socketAddr := &net.UnixAddr{ - Name: os.Getenv("NOTIFY_SOCKET"), - Net: "unixgram", - } - - if socketAddr.Name == "" { - return SdNotifyNoSocket - } - - conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) - if err != nil { - return err - } - defer conn.Close() - - _, err = conn.Write([]byte(state)) - return err -} diff --git a/vendor/src/github.com/coreos/go-systemd/journal/journal.go b/vendor/src/github.com/coreos/go-systemd/journal/journal.go deleted file mode 100644 index 6c3f5b94d..000000000 --- a/vendor/src/github.com/coreos/go-systemd/journal/journal.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package journal provides write bindings to the local systemd journal. -// It is implemented in pure Go and connects to the journal directly over its -// unix socket. -// -// To read from the journal, see the "sdjournal" package, which wraps the -// sd-journal a C API. -// -// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -package journal - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "syscall" -) - -// Priority of a journal message -type Priority int - -const ( - PriEmerg Priority = iota - PriAlert - PriCrit - PriErr - PriWarning - PriNotice - PriInfo - PriDebug -) - -var conn net.Conn - -func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } -} - -// Enabled returns true if the local systemd journal is available for logging -func Enabled() bool { - return conn != nil -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - if conn == nil { - return journalError("could not connect to journald socket") - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) - - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - unixConn.WriteMsgUnix([]byte{}, rights, nil) - } else if err != nil { - return journalError(err.Error()) - } - return nil -} - -// Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { - return Send(fmt.Sprintf(format, a...), priority, nil) -} - -func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ - - valid := name[0] != '_' - for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' - } - return valid -} - -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok { - return false - } - - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { - return false - } - - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS -} - -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) -} diff --git a/vendor/src/github.com/coreos/pkg/LICENSE b/vendor/src/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/src/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/src/github.com/coreos/pkg/capnslog/README.md b/vendor/src/github.com/coreos/pkg/capnslog/README.md deleted file mode 100644 index 81efb1fb6..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# capnslog, the CoreOS logging package - -There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). -capnslog provides a simple but consistent logging interface suitable for all kinds of projects. - -### Design Principles - -##### `package main` is the place where logging gets turned on and routed - -A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. - -##### All log options are runtime-configurable. - -Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. - -##### There is one log object per package. It is registered under its repository and package name. - -`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. - -##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. - -Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. - -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. - -##### Log objects are an interface - -An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. - -##### Log levels have specific meanings: - - * Critical: Unrecoverable. Must fail. - * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Notice: Normal, but important (uncommon) log information. - * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. - * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. - * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. - diff --git a/vendor/src/github.com/coreos/pkg/capnslog/formatters.go b/vendor/src/github.com/coreos/pkg/capnslog/formatters.go deleted file mode 100644 index 99ec6f824..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/formatters.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "fmt" - "io" - "runtime" - "strings" - "time" -) - -type Formatter interface { - Format(pkg string, level LogLevel, depth int, entries ...interface{}) - Flush() -} - -func NewStringFormatter(w io.Writer) *StringFormatter { - return &StringFormatter{ - w: bufio.NewWriter(w), - } -} - -type StringFormatter struct { - w *bufio.Writer -} - -func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { - now := time.Now().UTC() - s.w.WriteString(now.Format(time.RFC3339)) - s.w.WriteByte(' ') - writeEntries(s.w, pkg, l, i, entries...) - s.Flush() -} - -func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { - if pkg != "" { - w.WriteString(pkg + ": ") - } - str := fmt.Sprint(entries...) - endsInNL := strings.HasSuffix(str, "\n") - w.WriteString(str) - if !endsInNL { - w.WriteString("\n") - } -} - -func (s *StringFormatter) Flush() { - s.w.Flush() -} - -func NewPrettyFormatter(w io.Writer, debug bool) Formatter { - return &PrettyFormatter{ - w: bufio.NewWriter(w), - debug: debug, - } -} - -type PrettyFormatter struct { - w *bufio.Writer - debug bool -} - -func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { - now := time.Now() - ts := now.Format("2006-01-02 15:04:05") - c.w.WriteString(ts) - ms := now.Nanosecond() / 1000 - c.w.WriteString(fmt.Sprintf(".%06d", ms)) - if c.debug { - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) - } - c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) - writeEntries(c.w, pkg, l, depth, entries...) - c.Flush() -} - -func (c *PrettyFormatter) Flush() { - c.w.Flush() -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go deleted file mode 100644 index 426603ef3..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "bytes" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -var pid = os.Getpid() - -type GlogFormatter struct { - StringFormatter -} - -func NewGlogFormatter(w io.Writer) *GlogFormatter { - g := &GlogFormatter{} - g.w = bufio.NewWriter(w) - return g -} - -func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { - g.w.Write(GlogHeader(level, depth+1)) - g.StringFormatter.Format(pkg, level, depth+1, entries...) -} - -func GlogHeader(level LogLevel, depth int) []byte { - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - now := time.Now().UTC() - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - buf := &bytes.Buffer{} - buf.Grow(30) - _, month, day := now.Date() - hour, minute, second := now.Clock() - buf.WriteString(level.Char()) - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) - buf.WriteByte('Z') - buf.WriteByte(' ') - buf.WriteString(strconv.Itoa(pid)) - buf.WriteByte(' ') - buf.WriteString(file) - buf.WriteByte(':') - buf.WriteString(strconv.Itoa(line)) - buf.WriteByte(']') - buf.WriteByte(' ') - return buf.Bytes() -} - -const digits = "0123456789" - -func twoDigits(b *bytes.Buffer, d int) { - c2 := digits[d%10] - d /= 10 - c1 := digits[d%10] - b.WriteByte(c1) - b.WriteByte(c2) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init.go b/vendor/src/github.com/coreos/pkg/capnslog/init.go deleted file mode 100644 index 44b8cd361..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/init.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "io" - "os" - "syscall" -) - -// Here's where the opinionation comes in. We need some sensible defaults, -// especially after taking over the log package. Your project (whatever it may -// be) may see things differently. That's okay; there should be no defaults in -// the main package that cannot be controlled or overridden programatically, -// otherwise it's a bug. Doing so is creating your own init_log.go file much -// like this one. - -func init() { - initHijack() - - // Go `log` pacakge uses os.Stderr. - SetFormatter(NewDefaultFormatter(os.Stderr)) - SetGlobalLogLevel(INFO) -} - -func NewDefaultFormatter(out io.Writer) Formatter { - if syscall.Getppid() == 1 { - // We're running under init, which may be systemd. - f, err := NewJournaldFormatter() - if err == nil { - return f - } - } - return NewPrettyFormatter(out, false) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go deleted file mode 100644 index 455305065..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import "os" - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewPrettyFormatter(os.Stderr, false)) - SetGlobalLogLevel(INFO) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go deleted file mode 100644 index 72e05207c..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/coreos/go-systemd/journal" -) - -func NewJournaldFormatter() (Formatter, error) { - if !journal.Enabled() { - return nil, errors.New("No systemd detected") - } - return &journaldFormatter{}, nil -} - -type journaldFormatter struct{} - -func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - var pri journal.Priority - switch l { - case CRITICAL: - pri = journal.PriCrit - case ERROR: - pri = journal.PriErr - case WARNING: - pri = journal.PriWarning - case NOTICE: - pri = journal.PriNotice - case INFO: - pri = journal.PriInfo - case DEBUG: - pri = journal.PriDebug - case TRACE: - pri = journal.PriDebug - default: - panic("Unhandled loglevel") - } - msg := fmt.Sprint(entries...) - tags := map[string]string{ - "PACKAGE": pkg, - "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), - } - err := journal.Send(msg, pri, tags) - if err != nil { - fmt.Fprintln(os.Stderr, err) - } -} - -func (j *journaldFormatter) Flush() {} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go deleted file mode 100644 index 970086b9f..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "log" -) - -func initHijack() { - pkg := NewPackageLogger("log", "") - w := packageWriter{pkg} - log.SetFlags(0) - log.SetPrefix("") - log.SetOutput(w) -} - -type packageWriter struct { - pl *PackageLogger -} - -func (p packageWriter) Write(b []byte) (int, error) { - if p.pl.level < INFO { - return 0, nil - } - p.pl.internalLog(calldepth+2, INFO, string(b)) - return len(b), nil -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/logmap.go b/vendor/src/github.com/coreos/pkg/capnslog/logmap.go deleted file mode 100644 index 849544883..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/logmap.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "errors" - "strings" - "sync" -) - -// LogLevel is the set of all log levels. -type LogLevel int8 - -const ( - // CRITICAL is the lowest log level; only errors which will end the program will be propagated. - CRITICAL LogLevel = iota - 1 - // ERROR is for errors that are not fatal but lead to troubling behavior. - ERROR - // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. - WARNING - // NOTICE is for normal but significant conditions. - NOTICE - // INFO is a log level for common, everyday log updates. - INFO - // DEBUG is the default hidden level for more verbose updates about internal processes. - DEBUG - // TRACE is for (potentially) call by call tracing of programs. - TRACE -) - -// Char returns a single-character representation of the log level. -func (l LogLevel) Char() string { - switch l { - case CRITICAL: - return "C" - case ERROR: - return "E" - case WARNING: - return "W" - case NOTICE: - return "N" - case INFO: - return "I" - case DEBUG: - return "D" - case TRACE: - return "T" - default: - panic("Unhandled loglevel") - } -} - -// String returns a multi-character representation of the log level. -func (l LogLevel) String() string { - switch l { - case CRITICAL: - return "CRITICAL" - case ERROR: - return "ERROR" - case WARNING: - return "WARNING" - case NOTICE: - return "NOTICE" - case INFO: - return "INFO" - case DEBUG: - return "DEBUG" - case TRACE: - return "TRACE" - default: - panic("Unhandled loglevel") - } -} - -// Update using the given string value. Fulfills the flag.Value interface. -func (l *LogLevel) Set(s string) error { - value, err := ParseLevel(s) - if err != nil { - return err - } - - *l = value - return nil -} - -// ParseLevel translates some potential loglevel strings into their corresponding levels. -func ParseLevel(s string) (LogLevel, error) { - switch s { - case "CRITICAL", "C": - return CRITICAL, nil - case "ERROR", "0", "E": - return ERROR, nil - case "WARNING", "1", "W": - return WARNING, nil - case "NOTICE", "2", "N": - return NOTICE, nil - case "INFO", "3", "I": - return INFO, nil - case "DEBUG", "4", "D": - return DEBUG, nil - case "TRACE", "5", "T": - return TRACE, nil - } - return CRITICAL, errors.New("couldn't parse log level " + s) -} - -type RepoLogger map[string]*PackageLogger - -type loggerStruct struct { - sync.Mutex - repoMap map[string]RepoLogger - formatter Formatter -} - -// logger is the global logger -var logger = new(loggerStruct) - -// SetGlobalLogLevel sets the log level for all packages in all repositories -// registered with capnslog. -func SetGlobalLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - for _, r := range logger.repoMap { - r.setRepoLogLevelInternal(l) - } -} - -// GetRepoLogger may return the handle to the repository's set of packages' loggers. -func GetRepoLogger(repo string) (RepoLogger, error) { - logger.Lock() - defer logger.Unlock() - r, ok := logger.repoMap[repo] - if !ok { - return nil, errors.New("no packages registered for repo " + repo) - } - return r, nil -} - -// MustRepoLogger returns the handle to the repository's packages' loggers. -func MustRepoLogger(repo string) RepoLogger { - r, err := GetRepoLogger(repo) - if err != nil { - panic(err) - } - return r -} - -// SetRepoLogLevel sets the log level for all packages in the repository. -func (r RepoLogger) SetRepoLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - r.setRepoLogLevelInternal(l) -} - -func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { - for _, v := range r { - v.level = l - } -} - -// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in -// order, and returns a map of the results, for use in SetLogLevel. -func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { - setlist := strings.Split(conf, ",") - out := make(map[string]LogLevel) - for _, setstring := range setlist { - setting := strings.Split(setstring, "=") - if len(setting) != 2 { - return nil, errors.New("oddly structured `pkg=level` option: " + setstring) - } - l, err := ParseLevel(setting[1]) - if err != nil { - return nil, err - } - out[setting[0]] = l - } - return out, nil -} - -// SetLogLevel takes a map of package names within a repository to their desired -// loglevel, and sets the levels appropriately. Unknown packages are ignored. -// "*" is a special package name that corresponds to all packages, and will be -// processed first. -func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { - logger.Lock() - defer logger.Unlock() - if l, ok := m["*"]; ok { - r.setRepoLogLevelInternal(l) - } - for k, v := range m { - l, ok := r[k] - if !ok { - continue - } - l.level = v - } -} - -// SetFormatter sets the formatting function for all logs. -func SetFormatter(f Formatter) { - logger.Lock() - defer logger.Unlock() - logger.formatter = f -} - -// NewPackageLogger creates a package logger object. -// This should be defined as a global var in your package, referencing your repo. -func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { - logger.Lock() - defer logger.Unlock() - if logger.repoMap == nil { - logger.repoMap = make(map[string]RepoLogger) - } - r, rok := logger.repoMap[repo] - if !rok { - logger.repoMap[repo] = make(RepoLogger) - r = logger.repoMap[repo] - } - p, pok := r[pkg] - if !pok { - r[pkg] = &PackageLogger{ - pkg: pkg, - level: INFO, - } - p = r[pkg] - } - return -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go deleted file mode 100644 index 32d2f16a9..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "fmt" - "os" -) - -type PackageLogger struct { - pkg string - level LogLevel -} - -const calldepth = 2 - -func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { - if inLevel != CRITICAL && p.level < inLevel { - return - } - logger.Lock() - defer logger.Unlock() - if logger.formatter != nil { - logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) - } -} - -func (p *PackageLogger) LevelAt(l LogLevel) bool { - return p.level >= l -} - -// Log a formatted string at any level between ERROR and TRACE -func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) -} - -// Log a message at any level between ERROR and TRACE -func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprint(args...)) -} - -// log stdlib compatibility - -func (p *PackageLogger) Println(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) -} - -func (p *PackageLogger) Printf(format string, args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Print(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprint(args...)) -} - -// Panic and fatal - -func (p *PackageLogger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Fatalf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -func (p *PackageLogger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -// Error Functions - -func (p *PackageLogger) Errorf(format string, args ...interface{}) { - p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Error(entries ...interface{}) { - p.internalLog(calldepth, ERROR, entries...) -} - -// Warning Functions - -func (p *PackageLogger) Warningf(format string, args ...interface{}) { - p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Warning(entries ...interface{}) { - p.internalLog(calldepth, WARNING, entries...) -} - -// Notice Functions - -func (p *PackageLogger) Noticef(format string, args ...interface{}) { - p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Notice(entries ...interface{}) { - p.internalLog(calldepth, NOTICE, entries...) -} - -// Info Functions - -func (p *PackageLogger) Infof(format string, args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Info(entries ...interface{}) { - p.internalLog(calldepth, INFO, entries...) -} - -// Debug Functions - -func (p *PackageLogger) Debugf(format string, args ...interface{}) { - p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Debug(entries ...interface{}) { - p.internalLog(calldepth, DEBUG, entries...) -} - -// Trace Functions - -func (p *PackageLogger) Tracef(format string, args ...interface{}) { - p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Trace(entries ...interface{}) { - p.internalLog(calldepth, TRACE, entries...) -} - -func (p *PackageLogger) Flush() { - logger.Lock() - defer logger.Unlock() - logger.formatter.Flush() -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go deleted file mode 100644 index 4be5a1f2d..000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "fmt" - "log/syslog" -) - -func NewSyslogFormatter(w *syslog.Writer) Formatter { - return &syslogFormatter{w} -} - -func NewDefaultSyslogFormatter(tag string) (Formatter, error) { - w, err := syslog.New(syslog.LOG_DEBUG, tag) - if err != nil { - return nil, err - } - return NewSyslogFormatter(w), nil -} - -type syslogFormatter struct { - w *syslog.Writer -} - -func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - for _, entry := range entries { - str := fmt.Sprint(entry) - switch l { - case CRITICAL: - s.w.Crit(str) - case ERROR: - s.w.Err(str) - case WARNING: - s.w.Warning(str) - case NOTICE: - s.w.Notice(str) - case INFO: - s.w.Info(str) - case DEBUG: - s.w.Debug(str) - case TRACE: - s.w.Debug(str) - default: - panic("Unhandled loglevel") - } - } -} - -func (s *syslogFormatter) Flush() { -} diff --git a/vendor/src/github.com/deckarep/golang-set/.gitignore b/vendor/src/github.com/deckarep/golang-set/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/src/github.com/deckarep/golang-set/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/deckarep/golang-set/.travis.yml b/vendor/src/github.com/deckarep/golang-set/.travis.yml deleted file mode 100644 index db1359c72..000000000 --- a/vendor/src/github.com/deckarep/golang-set/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.2 - -script: - - go test ./... - #- go test -race ./... - diff --git a/vendor/src/github.com/deckarep/golang-set/LICENSE b/vendor/src/github.com/deckarep/golang-set/LICENSE deleted file mode 100644 index b5768f89c..000000000 --- a/vendor/src/github.com/deckarep/golang-set/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/src/github.com/deckarep/golang-set/README.md b/vendor/src/github.com/deckarep/golang-set/README.md deleted file mode 100644 index 744b1841c..000000000 --- a/vendor/src/github.com/deckarep/golang-set/README.md +++ /dev/null @@ -1,94 +0,0 @@ -[![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set) -[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set) - -## golang-set - - -The missing set collection for the Go language. Until Go has sets built-in...use this. - -Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. -You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository -and carry-on and to the rest that find this useful please contribute in helping me make it better by: - -* Helping to make more idiomatic improvements to the code. -* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ -* Helping to make the unit-tests more robust and kick-ass. -* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) -* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) - -I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) - -*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. - -## Features (as of 9/22/2014) - -* a CartesionProduct() method has been added with unit-tests: [Read more about the cartesion product](http://en.wikipedia.org/wiki/Cartesian_product) - -## Features (as of 9/15/2014) - -* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) - -## Features (as of 4/22/2014) - -* One common interface to both implementations -* Two set implementations to choose from - * a thread-safe implementation designed for concurrent use - * a non-thread-safe implementation designed for performance -* 75 benchmarks for both implementations -* 35 unit tests for both implementations -* 14 concurrent tests for the thread-safe implementation - - - -Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind -however that the Python set is a built-in type and supports additional features and syntax that make it awesome. - -## Examples but not exhaustive: - -```go -requiredClasses := mapset.NewSet() -requiredClasses.Add("Cooking") -requiredClasses.Add("English") -requiredClasses.Add("Math") -requiredClasses.Add("Biology") - -scienceSlice := []interface{}{"Biology", "Chemistry"} -scienceClasses := mapset.NewSetFromSlice(scienceSlice) - -electiveClasses := mapset.NewSet() -electiveClasses.Add("Welding") -electiveClasses.Add("Music") -electiveClasses.Add("Automotive") - -bonusClasses := mapset.NewSet() -bonusClasses.Add("Go Programming") -bonusClasses.Add("Python Programming") - -//Show me all the available classes I can take -allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) -fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} - - -//Is cooking considered a science class? -fmt.Println(scienceClasses.Contains("Cooking")) //false - -//Show me all classes that are not science classes, since I hate science. -fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} - -//Which science classes are also required classes? -fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} - -//How many bonus classes do you offer? -fmt.Println(bonusClasses.Cardinality()) //2 - -//Do you have the following classes? Welding, Automotive and English? -fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true -``` - -Thanks! - --Ralph - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - -[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/src/github.com/deckarep/golang-set/set.go b/vendor/src/github.com/deckarep/golang-set/set.go deleted file mode 100644 index eccba70e4..000000000 --- a/vendor/src/github.com/deckarep/golang-set/set.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -// Package mapset implements a simple and generic set collection. -// Items stored within it are unordered and unique. It supports -// typical set operations: membership testing, intersection, union, -// difference, symmetric difference and cloning. -// -// Package mapset provides two implementations. The default -// implementation is safe for concurrent access. There is a non-threadsafe -// implementation which is slightly more performant. -package mapset - -type Set interface { - // Adds an element to the set. Returns whether - // the item was added. - Add(i interface{}) bool - - // Returns the number of elements in the set. - Cardinality() int - - // Removes all elements from the set, leaving - // the emtpy set. - Clear() - - // Returns a clone of the set using the same - // implementation, duplicating all keys. - Clone() Set - - // Returns whether the given items - // are all in the set. - Contains(i ...interface{}) bool - - // Returns the difference between this set - // and other. The returned set will contain - // all elements of this set that are not also - // elements of other. - // - // Note that the argument to Difference - // must be of the same type as the receiver - // of the method. Otherwise, Difference will - // panic. - Difference(other Set) Set - - // Determines if two sets are equal to each - // other. If they have the same cardinality - // and contain the same elements, they are - // considered equal. The order in which - // the elements were added is irrelevant. - // - // Note that the argument to Equal must be - // of the same type as the receiver of the - // method. Otherwise, Equal will panic. - Equal(other Set) bool - - // Returns a new set containing only the elements - // that exist only in both sets. - // - // Note that the argument to Intersect - // must be of the same type as the receiver - // of the method. Otherwise, Intersect will - // panic. - Intersect(other Set) Set - - // Determines if every element in the other set - // is in this set. - // - // Note that the argument to IsSubset - // must be of the same type as the receiver - // of the method. Otherwise, IsSubset will - // panic. - IsSubset(other Set) bool - - // Determines if every element in this set is in - // the other set. - // - // Note that the argument to IsSuperset - // must be of the same type as the receiver - // of the method. Otherwise, IsSuperset will - // panic. - IsSuperset(other Set) bool - - // Returns a channel of elements that you can - // range over. - Iter() <-chan interface{} - - // Remove a single element from the set. - Remove(i interface{}) - - // Provides a convenient string representation - // of the current state of the set. - String() string - - // Returns a new set with all elements which are - // in either this set or the other set but not in both. - // - // Note that the argument to SymmetricDifference - // must be of the same type as the receiver - // of the method. Otherwise, SymmetricDifference - // will panic. - SymmetricDifference(other Set) Set - - // Returns a new set with all elements in both sets. - // - // Note that the argument to Union must be of the - // same type as the receiver of the method. - // Otherwise, IsSuperset will panic. - Union(other Set) Set - - // Returns all subsets of a given set (Power Set). - PowerSet() Set - - // Returns the Cartesian Product of two sets. - CartesianProduct(other Set) Set - - // Returns the members of the set as a slice. - ToSlice() []interface{} -} - -// Creates and returns a reference to an empty set. -func NewSet() Set { - set := newThreadSafeSet() - return &set -} - -// Creates and returns a reference to a set from an existing slice -func NewSetFromSlice(s []interface{}) Set { - a := NewSet() - for _, item := range s { - a.Add(item) - } - return a -} - -func NewThreadUnsafeSet() Set { - set := newThreadUnsafeSet() - return &set -} - -func NewThreadUnsafeSetFromSlice(s []interface{}) Set { - a := NewThreadUnsafeSet() - for _, item := range s { - a.Add(item) - } - return a -} diff --git a/vendor/src/github.com/deckarep/golang-set/threadsafe.go b/vendor/src/github.com/deckarep/golang-set/threadsafe.go deleted file mode 100644 index 9dca94af7..000000000 --- a/vendor/src/github.com/deckarep/golang-set/threadsafe.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import "sync" - -type threadSafeSet struct { - s threadUnsafeSet - sync.RWMutex -} - -func newThreadSafeSet() threadSafeSet { - return threadSafeSet{s: newThreadUnsafeSet()} -} - -func (set *threadSafeSet) Add(i interface{}) bool { - set.Lock() - ret := set.s.Add(i) - set.Unlock() - return ret -} - -func (set *threadSafeSet) Contains(i ...interface{}) bool { - set.RLock() - ret := set.s.Contains(i...) - set.RUnlock() - return ret -} - -func (set *threadSafeSet) IsSubset(other Set) bool { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - ret := set.s.IsSubset(&o.s) - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) IsSuperset(other Set) bool { - return other.IsSubset(set) -} - -func (set *threadSafeSet) Union(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeUnion} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Intersect(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeIntersection} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Difference(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeDifference} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) SymmetricDifference(other Set) Set { - o := other.(*threadSafeSet) - - unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) - return &threadSafeSet{s: *unsafeDifference} -} - -func (set *threadSafeSet) Clear() { - set.Lock() - set.s = newThreadUnsafeSet() - set.Unlock() -} - -func (set *threadSafeSet) Remove(i interface{}) { - set.Lock() - delete(set.s, i) - set.Unlock() -} - -func (set *threadSafeSet) Cardinality() int { - set.RLock() - defer set.RUnlock() - return len(set.s) -} - -func (set *threadSafeSet) Iter() <-chan interface{} { - ch := make(chan interface{}) - go func() { - set.RLock() - - for elem := range set.s { - ch <- elem - } - close(ch) - set.RUnlock() - }() - - return ch -} - -func (set *threadSafeSet) Equal(other Set) bool { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - ret := set.s.Equal(&o.s) - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Clone() Set { - set.RLock() - - unsafeClone := set.s.Clone().(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeClone} - set.RUnlock() - return ret -} - -func (set *threadSafeSet) String() string { - set.RLock() - ret := set.s.String() - set.RUnlock() - return ret -} - -func (set *threadSafeSet) PowerSet() Set { - set.RLock() - ret := set.s.PowerSet() - set.RUnlock() - return ret -} - -func (set *threadSafeSet) CartesianProduct(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeCartProduct} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) ToSlice() []interface{} { - set.RLock() - keys := make([]interface{}, 0, set.Cardinality()) - for elem := range set.s { - keys = append(keys, elem) - } - set.RUnlock() - return keys -} diff --git a/vendor/src/github.com/deckarep/golang-set/threadunsafe.go b/vendor/src/github.com/deckarep/golang-set/threadunsafe.go deleted file mode 100644 index 124521e2e..000000000 --- a/vendor/src/github.com/deckarep/golang-set/threadunsafe.go +++ /dev/null @@ -1,246 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import ( - "fmt" - "reflect" - "strings" -) - -type threadUnsafeSet map[interface{}]struct{} - -type orderedPair struct { - first interface{} - second interface{} -} - -func newThreadUnsafeSet() threadUnsafeSet { - return make(threadUnsafeSet) -} - -func (pair *orderedPair) Equal(other orderedPair) bool { - if pair.first == other.first && - pair.second == other.second { - return true - } - - return false -} - -func (set *threadUnsafeSet) Add(i interface{}) bool { - _, found := (*set)[i] - (*set)[i] = struct{}{} - return !found //False if it existed already -} - -func (set *threadUnsafeSet) Contains(i ...interface{}) bool { - for _, val := range i { - if _, ok := (*set)[val]; !ok { - return false - } - } - return true -} - -func (set *threadUnsafeSet) IsSubset(other Set) bool { - _ = other.(*threadUnsafeSet) - for elem := range *set { - if !other.Contains(elem) { - return false - } - } - return true -} - -func (set *threadUnsafeSet) IsSuperset(other Set) bool { - return other.IsSubset(set) -} - -func (set *threadUnsafeSet) Union(other Set) Set { - o := other.(*threadUnsafeSet) - - unionedSet := newThreadUnsafeSet() - - for elem := range *set { - unionedSet.Add(elem) - } - for elem := range *o { - unionedSet.Add(elem) - } - return &unionedSet -} - -func (set *threadUnsafeSet) Intersect(other Set) Set { - o := other.(*threadUnsafeSet) - - intersection := newThreadUnsafeSet() - // loop over smaller set - if set.Cardinality() < other.Cardinality() { - for elem := range *set { - if other.Contains(elem) { - intersection.Add(elem) - } - } - } else { - for elem := range *o { - if set.Contains(elem) { - intersection.Add(elem) - } - } - } - return &intersection -} - -func (set *threadUnsafeSet) Difference(other Set) Set { - _ = other.(*threadUnsafeSet) - - difference := newThreadUnsafeSet() - for elem := range *set { - if !other.Contains(elem) { - difference.Add(elem) - } - } - return &difference -} - -func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { - _ = other.(*threadUnsafeSet) - - aDiff := set.Difference(other) - bDiff := other.Difference(set) - return aDiff.Union(bDiff) -} - -func (set *threadUnsafeSet) Clear() { - *set = newThreadUnsafeSet() -} - -func (set *threadUnsafeSet) Remove(i interface{}) { - delete(*set, i) -} - -func (set *threadUnsafeSet) Cardinality() int { - return len(*set) -} - -func (set *threadUnsafeSet) Iter() <-chan interface{} { - ch := make(chan interface{}) - go func() { - for elem := range *set { - ch <- elem - } - close(ch) - }() - - return ch -} - -func (set *threadUnsafeSet) Equal(other Set) bool { - _ = other.(*threadUnsafeSet) - - if set.Cardinality() != other.Cardinality() { - return false - } - for elem := range *set { - if !other.Contains(elem) { - return false - } - } - return true -} - -func (set *threadUnsafeSet) Clone() Set { - clonedSet := newThreadUnsafeSet() - for elem := range *set { - clonedSet.Add(elem) - } - return &clonedSet -} - -func (set *threadUnsafeSet) String() string { - items := make([]string, 0, len(*set)) - - for elem := range *set { - items = append(items, fmt.Sprintf("%v", elem)) - } - return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) -} - -func (pair orderedPair) String() string { - return fmt.Sprintf("(%v, %v)", pair.first, pair.second) -} - -func (set *threadUnsafeSet) PowerSet() Set { - powSet := NewThreadUnsafeSet() - nullset := newThreadUnsafeSet() - powSet.Add(&nullset) - - for es := range *set { - u := newThreadUnsafeSet() - j := powSet.Iter() - for er := range j { - p := newThreadUnsafeSet() - if reflect.TypeOf(er).Name() == "" { - k := er.(*threadUnsafeSet) - for ek := range *(k) { - p.Add(ek) - } - } else { - p.Add(er) - } - p.Add(es) - u.Add(&p) - } - - powSet = powSet.Union(&u) - } - - return powSet -} - -func (set *threadUnsafeSet) CartesianProduct(other Set) Set { - o := other.(*threadUnsafeSet) - cartProduct := NewThreadUnsafeSet() - - for i := range *set { - for j := range *o { - elem := orderedPair{first: i, second: j} - cartProduct.Add(elem) - } - } - - return cartProduct -} - -func (set *threadUnsafeSet) ToSlice() []interface{} { - keys := make([]interface{}, 0, set.Cardinality()) - for elem := range *set { - keys = append(keys, elem) - } - - return keys -} diff --git a/vendor/src/github.com/docker/containerd/LICENSE.code b/vendor/src/github.com/docker/containerd/LICENSE.code deleted file mode 100644 index 8f3fee627..000000000 --- a/vendor/src/github.com/docker/containerd/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/containerd/LICENSE.docs b/vendor/src/github.com/docker/containerd/LICENSE.docs deleted file mode 100644 index e26cd4fc8..000000000 --- a/vendor/src/github.com/docker/containerd/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go deleted file mode 100644 index e6fd0af4b..000000000 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go +++ /dev/null @@ -1,1468 +0,0 @@ -// Code generated by protoc-gen-go. -// source: api.proto -// DO NOT EDIT! - -/* -Package types is a generated protocol buffer package. - -It is generated from these files: - api.proto - -It has these top-level messages: - GetServerVersionRequest - GetServerVersionResponse - UpdateProcessRequest - UpdateProcessResponse - CreateContainerRequest - CreateContainerResponse - SignalRequest - SignalResponse - AddProcessRequest - Rlimit - User - AddProcessResponse - CreateCheckpointRequest - CreateCheckpointResponse - DeleteCheckpointRequest - DeleteCheckpointResponse - ListCheckpointRequest - Checkpoint - ListCheckpointResponse - StateRequest - ContainerState - Process - Container - Machine - StateResponse - UpdateContainerRequest - UpdateResource - UpdateContainerResponse - EventsRequest - Event - NetworkStats - CpuUsage - ThrottlingData - CpuStats - PidsStats - MemoryData - MemoryStats - BlkioStatsEntry - BlkioStats - HugetlbStats - CgroupStats - StatsResponse - StatsRequest -*/ -package types - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 - -type GetServerVersionRequest struct { -} - -func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } -func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetServerVersionRequest) ProtoMessage() {} -func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type GetServerVersionResponse struct { - Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` -} - -func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } -func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetServerVersionResponse) ProtoMessage() {} -func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type UpdateProcessRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` - Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` - Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` -} - -func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } -func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateProcessRequest) ProtoMessage() {} -func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type UpdateProcessResponse struct { -} - -func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } -func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateProcessResponse) ProtoMessage() {} -func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -type CreateContainerRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` - Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` - Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` - NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` - Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` - RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` - CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } -func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } -func (*CreateContainerRequest) ProtoMessage() {} -func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -type CreateContainerResponse struct { - Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` -} - -func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } -func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } -func (*CreateContainerResponse) ProtoMessage() {} -func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *CreateContainerResponse) GetContainer() *Container { - if m != nil { - return m.Container - } - return nil -} - -type SignalRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` -} - -func (m *SignalRequest) Reset() { *m = SignalRequest{} } -func (m *SignalRequest) String() string { return proto.CompactTextString(m) } -func (*SignalRequest) ProtoMessage() {} -func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -type SignalResponse struct { -} - -func (m *SignalResponse) Reset() { *m = SignalResponse{} } -func (m *SignalResponse) String() string { return proto.CompactTextString(m) } -func (*SignalResponse) ProtoMessage() {} -func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -type AddProcessRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` - User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` - Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` - Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` - Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` - Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` - Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` - ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` - SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` - NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` - Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` -} - -func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } -func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } -func (*AddProcessRequest) ProtoMessage() {} -func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *AddProcessRequest) GetUser() *User { - if m != nil { - return m.User - } - return nil -} - -func (m *AddProcessRequest) GetRlimits() []*Rlimit { - if m != nil { - return m.Rlimits - } - return nil -} - -type Rlimit struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` - Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` -} - -func (m *Rlimit) Reset() { *m = Rlimit{} } -func (m *Rlimit) String() string { return proto.CompactTextString(m) } -func (*Rlimit) ProtoMessage() {} -func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -type User struct { - Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` - AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -type AddProcessResponse struct { -} - -func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } -func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } -func (*AddProcessResponse) ProtoMessage() {} -func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -type CreateCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` - CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } -func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*CreateCheckpointRequest) ProtoMessage() {} -func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { - if m != nil { - return m.Checkpoint - } - return nil -} - -type CreateCheckpointResponse struct { -} - -func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } -func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*CreateCheckpointResponse) ProtoMessage() {} -func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -type DeleteCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } -func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCheckpointRequest) ProtoMessage() {} -func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -type DeleteCheckpointResponse struct { -} - -func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } -func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCheckpointResponse) ProtoMessage() {} -func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -type ListCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } -func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*ListCheckpointRequest) ProtoMessage() {} -func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -type Checkpoint struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` - Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` - UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` - Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` - EmptyNS []string `protobuf:"bytes,6,rep,name=emptyNS" json:"emptyNS,omitempty"` -} - -func (m *Checkpoint) Reset() { *m = Checkpoint{} } -func (m *Checkpoint) String() string { return proto.CompactTextString(m) } -func (*Checkpoint) ProtoMessage() {} -func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -type ListCheckpointResponse struct { - Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` -} - -func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } -func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*ListCheckpointResponse) ProtoMessage() {} -func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { - if m != nil { - return m.Checkpoints - } - return nil -} - -type StateRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` -} - -func (m *StateRequest) Reset() { *m = StateRequest{} } -func (m *StateRequest) String() string { return proto.CompactTextString(m) } -func (*StateRequest) ProtoMessage() {} -func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -type ContainerState struct { - Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` -} - -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (m *ContainerState) String() string { return proto.CompactTextString(m) } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -type Process struct { - Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` - Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` - User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` - Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` - Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` - SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` - Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` - Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` - ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` - SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` - NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` - Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` -} - -func (m *Process) Reset() { *m = Process{} } -func (m *Process) String() string { return proto.CompactTextString(m) } -func (*Process) ProtoMessage() {} -func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *Process) GetUser() *User { - if m != nil { - return m.User - } - return nil -} - -func (m *Process) GetRlimits() []*Rlimit { - if m != nil { - return m.Rlimits - } - return nil -} - -type Container struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` - Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` - Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` - Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` - Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` -} - -func (m *Container) Reset() { *m = Container{} } -func (m *Container) String() string { return proto.CompactTextString(m) } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *Container) GetProcesses() []*Process { - if m != nil { - return m.Processes - } - return nil -} - -// Machine is information about machine on which containerd is run -type Machine struct { - Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` - Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` -} - -func (m *Machine) Reset() { *m = Machine{} } -func (m *Machine) String() string { return proto.CompactTextString(m) } -func (*Machine) ProtoMessage() {} -func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -// StateResponse is information about containerd daemon -type StateResponse struct { - Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` - Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` -} - -func (m *StateResponse) Reset() { *m = StateResponse{} } -func (m *StateResponse) String() string { return proto.CompactTextString(m) } -func (*StateResponse) ProtoMessage() {} -func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *StateResponse) GetContainers() []*Container { - if m != nil { - return m.Containers - } - return nil -} - -func (m *StateResponse) GetMachine() *Machine { - if m != nil { - return m.Machine - } - return nil -} - -type UpdateContainerRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` -} - -func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } -func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateContainerRequest) ProtoMessage() {} -func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *UpdateContainerRequest) GetResources() *UpdateResource { - if m != nil { - return m.Resources - } - return nil -} - -type UpdateResource struct { - BlkioWeight uint64 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` - CpuShares uint64 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` - CpuPeriod uint64 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` - CpuQuota uint64 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` - CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` - CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` - MemoryLimit uint64 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` - MemorySwap uint64 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` - MemoryReservation uint64 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` - KernelMemoryLimit uint64 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` - KernelTCPMemoryLimit uint64 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"` -} - -func (m *UpdateResource) Reset() { *m = UpdateResource{} } -func (m *UpdateResource) String() string { return proto.CompactTextString(m) } -func (*UpdateResource) ProtoMessage() {} -func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -type UpdateContainerResponse struct { -} - -func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } -func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateContainerResponse) ProtoMessage() {} -func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -type EventsRequest struct { - Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` -} - -func (m *EventsRequest) Reset() { *m = EventsRequest{} } -func (m *EventsRequest) String() string { return proto.CompactTextString(m) } -func (*EventsRequest) ProtoMessage() {} -func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -type Event struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` - Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` - Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -type NetworkStats struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"` - Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=rxErrors" json:"Rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=rxDropped" json:"Rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=txBytes" json:"Tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=txPackets" json:"Tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=txErrors" json:"Tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=txDropped" json:"Tx_dropped,omitempty"` -} - -func (m *NetworkStats) Reset() { *m = NetworkStats{} } -func (m *NetworkStats) String() string { return proto.CompactTextString(m) } -func (*NetworkStats) ProtoMessage() {} -func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type CpuUsage struct { - TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"` - PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"` - UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"` - UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"` -} - -func (m *CpuUsage) Reset() { *m = CpuUsage{} } -func (m *CpuUsage) String() string { return proto.CompactTextString(m) } -func (*CpuUsage) ProtoMessage() {} -func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -type ThrottlingData struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` -} - -func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } -func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } -func (*ThrottlingData) ProtoMessage() {} -func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -type CpuStats struct { - CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` - ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"` - SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"` -} - -func (m *CpuStats) Reset() { *m = CpuStats{} } -func (m *CpuStats) String() string { return proto.CompactTextString(m) } -func (*CpuStats) ProtoMessage() {} -func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *CpuStats) GetCpuUsage() *CpuUsage { - if m != nil { - return m.CpuUsage - } - return nil -} - -func (m *CpuStats) GetThrottlingData() *ThrottlingData { - if m != nil { - return m.ThrottlingData - } - return nil -} - -type PidsStats struct { - Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` -} - -func (m *PidsStats) Reset() { *m = PidsStats{} } -func (m *PidsStats) String() string { return proto.CompactTextString(m) } -func (*PidsStats) ProtoMessage() {} -func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -type MemoryData struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` - Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` -} - -func (m *MemoryData) Reset() { *m = MemoryData{} } -func (m *MemoryData) String() string { return proto.CompactTextString(m) } -func (*MemoryData) ProtoMessage() {} -func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -type MemoryStats struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` - Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` - SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"` - KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"` - Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` -} - -func (m *MemoryStats) Reset() { *m = MemoryStats{} } -func (m *MemoryStats) String() string { return proto.CompactTextString(m) } -func (*MemoryStats) ProtoMessage() {} -func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *MemoryStats) GetUsage() *MemoryData { - if m != nil { - return m.Usage - } - return nil -} - -func (m *MemoryStats) GetSwapUsage() *MemoryData { - if m != nil { - return m.SwapUsage - } - return nil -} - -func (m *MemoryStats) GetKernelUsage() *MemoryData { - if m != nil { - return m.KernelUsage - } - return nil -} - -func (m *MemoryStats) GetStats() map[string]uint64 { - if m != nil { - return m.Stats - } - return nil -} - -type BlkioStatsEntry struct { - Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` - Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` -} - -func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } -func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } -func (*BlkioStatsEntry) ProtoMessage() {} -func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type BlkioStats struct { - IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` -} - -func (m *BlkioStats) Reset() { *m = BlkioStats{} } -func (m *BlkioStats) String() string { return proto.CompactTextString(m) } -func (*BlkioStats) ProtoMessage() {} -func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } - -func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServiceBytesRecursive - } - return nil -} - -func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServicedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoQueuedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServiceTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoWaitTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoMergedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { - if m != nil { - return m.SectorsRecursive - } - return nil -} - -type HugetlbStats struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` - Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` -} - -func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } -func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } -func (*HugetlbStats) ProtoMessage() {} -func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } - -type CgroupStats struct { - CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"` - MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"` - BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"` - HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"` -} - -func (m *CgroupStats) Reset() { *m = CgroupStats{} } -func (m *CgroupStats) String() string { return proto.CompactTextString(m) } -func (*CgroupStats) ProtoMessage() {} -func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } - -func (m *CgroupStats) GetCpuStats() *CpuStats { - if m != nil { - return m.CpuStats - } - return nil -} - -func (m *CgroupStats) GetMemoryStats() *MemoryStats { - if m != nil { - return m.MemoryStats - } - return nil -} - -func (m *CgroupStats) GetBlkioStats() *BlkioStats { - if m != nil { - return m.BlkioStats - } - return nil -} - -func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { - if m != nil { - return m.HugetlbStats - } - return nil -} - -func (m *CgroupStats) GetPidsStats() *PidsStats { - if m != nil { - return m.PidsStats - } - return nil -} - -type StatsResponse struct { - NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"` - Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` -} - -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (m *StatsResponse) String() string { return proto.CompactTextString(m) } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } - -func (m *StatsResponse) GetNetworkStats() []*NetworkStats { - if m != nil { - return m.NetworkStats - } - return nil -} - -func (m *StatsResponse) GetCgroupStats() *CgroupStats { - if m != nil { - return m.CgroupStats - } - return nil -} - -type StatsRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (m *StatsRequest) String() string { return proto.CompactTextString(m) } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } - -func init() { - proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") - proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") - proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") - proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") - proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") - proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") - proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") - proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") - proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") - proto.RegisterType((*Rlimit)(nil), "types.Rlimit") - proto.RegisterType((*User)(nil), "types.User") - proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") - proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") - proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") - proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") - proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") - proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") - proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") - proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") - proto.RegisterType((*StateRequest)(nil), "types.StateRequest") - proto.RegisterType((*ContainerState)(nil), "types.ContainerState") - proto.RegisterType((*Process)(nil), "types.Process") - proto.RegisterType((*Container)(nil), "types.Container") - proto.RegisterType((*Machine)(nil), "types.Machine") - proto.RegisterType((*StateResponse)(nil), "types.StateResponse") - proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") - proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") - proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") - proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") - proto.RegisterType((*Event)(nil), "types.Event") - proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") - proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") - proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") - proto.RegisterType((*CpuStats)(nil), "types.CpuStats") - proto.RegisterType((*PidsStats)(nil), "types.PidsStats") - proto.RegisterType((*MemoryData)(nil), "types.MemoryData") - proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") - proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") - proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") - proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") - proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") - proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") - proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion2 - -// Client API for API service - -type APIClient interface { - GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) - CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) - UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) - Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) - UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) - AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) - CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) - DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) - ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) - State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) - Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) - Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) -} - -type aPIClient struct { - cc *grpc.ClientConn -} - -func NewAPIClient(cc *grpc.ClientConn) APIClient { - return &aPIClient{cc} -} - -func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { - out := new(GetServerVersionResponse) - err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { - out := new(CreateContainerResponse) - err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { - out := new(UpdateContainerResponse) - err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { - out := new(SignalResponse) - err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { - out := new(UpdateProcessResponse) - err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { - out := new(AddProcessResponse) - err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { - out := new(CreateCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { - out := new(DeleteCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { - out := new(ListCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { - out := new(StateResponse) - err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { - stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) - if err != nil { - return nil, err - } - x := &aPIEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type API_EventsClient interface { - Recv() (*Event, error) - grpc.ClientStream -} - -type aPIEventsClient struct { - grpc.ClientStream -} - -func (x *aPIEventsClient) Recv() (*Event, error) { - m := new(Event) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { - out := new(StatsResponse) - err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for API service - -type APIServer interface { - GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) - CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Signal(context.Context, *SignalRequest) (*SignalResponse, error) - UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) - AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) - CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) - DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) - ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) - State(context.Context, *StateRequest) (*StateResponse, error) - Events(*EventsRequest, API_EventsServer) error - Stats(context.Context, *StatsRequest) (*StatsResponse, error) -} - -func RegisterAPIServer(s *grpc.Server, srv APIServer) { - s.RegisterService(&_API_serviceDesc, srv) -} - -func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServerVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).GetServerVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/GetServerVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).CreateContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/CreateContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).UpdateContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/UpdateContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignalRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).Signal(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/Signal", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).UpdateProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/UpdateProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).AddProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/AddProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).CreateCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/CreateCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).DeleteCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/DeleteCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).ListCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/ListCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).State(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/State", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).State(ctx, req.(*StateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(EventsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(APIServer).Events(m, &aPIEventsServer{stream}) -} - -type API_EventsServer interface { - Send(*Event) error - grpc.ServerStream -} - -type aPIEventsServer struct { - grpc.ServerStream -} - -func (x *aPIEventsServer) Send(m *Event) error { - return x.ServerStream.SendMsg(m) -} - -func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).Stats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/Stats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _API_serviceDesc = grpc.ServiceDesc{ - ServiceName: "types.API", - HandlerType: (*APIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetServerVersion", - Handler: _API_GetServerVersion_Handler, - }, - { - MethodName: "CreateContainer", - Handler: _API_CreateContainer_Handler, - }, - { - MethodName: "UpdateContainer", - Handler: _API_UpdateContainer_Handler, - }, - { - MethodName: "Signal", - Handler: _API_Signal_Handler, - }, - { - MethodName: "UpdateProcess", - Handler: _API_UpdateProcess_Handler, - }, - { - MethodName: "AddProcess", - Handler: _API_AddProcess_Handler, - }, - { - MethodName: "CreateCheckpoint", - Handler: _API_CreateCheckpoint_Handler, - }, - { - MethodName: "DeleteCheckpoint", - Handler: _API_DeleteCheckpoint_Handler, - }, - { - MethodName: "ListCheckpoint", - Handler: _API_ListCheckpoint_Handler, - }, - { - MethodName: "State", - Handler: _API_State_Handler, - }, - { - MethodName: "Stats", - Handler: _API_Stats_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Events", - Handler: _API_Events_Handler, - ServerStreams: true, - }, - }, -} - -var fileDescriptor0 = []byte{ - // 2361 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x73, 0x1b, 0x4b, - 0x11, 0x8f, 0xbe, 0xad, 0xd6, 0x87, 0xed, 0x8d, 0x3f, 0x14, 0xbd, 0x97, 0xbc, 0xb0, 0xf5, 0x80, - 0x00, 0x0f, 0x13, 0x94, 0xf7, 0x8a, 0x14, 0x54, 0x51, 0x95, 0xd8, 0xe1, 0x11, 0x5e, 0x1c, 0x94, - 0xb5, 0xcd, 0x3b, 0xaa, 0xd6, 0xab, 0x89, 0xb4, 0x58, 0xda, 0xdd, 0xec, 0x8e, 0x6c, 0xf9, 0xc2, - 0x81, 0x03, 0xdc, 0x38, 0x53, 0x05, 0xc5, 0x85, 0x1b, 0x77, 0x0e, 0xfc, 0x05, 0x54, 0xf1, 0x87, - 0x70, 0xe3, 0xce, 0x91, 0x9e, 0x9e, 0xd9, 0xd9, 0x59, 0x7d, 0xd8, 0xe1, 0x40, 0x71, 0x79, 0x17, - 0xd5, 0xf4, 0x6f, 0x7a, 0xba, 0x7b, 0xba, 0x7b, 0x7a, 0x7a, 0x47, 0x50, 0x77, 0x23, 0xff, 0x20, - 0x8a, 0x43, 0x1e, 0x5a, 0x15, 0x7e, 0x1d, 0xb1, 0xc4, 0xbe, 0x07, 0xfb, 0x9f, 0x33, 0x7e, 0xc2, - 0xe2, 0x4b, 0x16, 0xff, 0x82, 0xc5, 0x89, 0x1f, 0x06, 0x0e, 0x7b, 0x37, 0x63, 0x09, 0xb7, 0xe7, - 0xd0, 0x59, 0x9e, 0x4a, 0xa2, 0x30, 0x48, 0x98, 0xb5, 0x03, 0x95, 0xa9, 0xfb, 0xcb, 0x30, 0xee, - 0x14, 0x1e, 0x16, 0x1e, 0xb5, 0x1c, 0x49, 0x10, 0xea, 0x07, 0x88, 0x16, 0x15, 0x2a, 0x08, 0x81, - 0x46, 0x2e, 0xf7, 0xc6, 0x9d, 0x92, 0x44, 0x89, 0xb0, 0xba, 0xb0, 0x11, 0xb3, 0x4b, 0x5f, 0x48, - 0xed, 0x94, 0x71, 0xa2, 0xee, 0x68, 0xda, 0xfe, 0x4d, 0x01, 0x76, 0xce, 0xa2, 0xa1, 0xcb, 0x59, - 0x3f, 0x0e, 0x3d, 0x96, 0x24, 0xca, 0x24, 0xab, 0x0d, 0x45, 0x7f, 0x48, 0x3a, 0xeb, 0x0e, 0x8e, - 0xac, 0x2d, 0x28, 0x45, 0x08, 0x14, 0x09, 0x10, 0x43, 0xeb, 0x01, 0x80, 0x37, 0x09, 0x13, 0x76, - 0xc2, 0x87, 0x7e, 0x40, 0x1a, 0x37, 0x1c, 0x03, 0x11, 0xc6, 0x5c, 0xf9, 0x43, 0x3e, 0x26, 0x9d, - 0x68, 0x0c, 0x11, 0xd6, 0x1e, 0x54, 0xc7, 0xcc, 0x1f, 0x8d, 0x79, 0xa7, 0x42, 0xb0, 0xa2, 0xec, - 0x7d, 0xd8, 0x5d, 0xb0, 0x43, 0xee, 0xdf, 0xfe, 0x47, 0x11, 0xf6, 0x0e, 0x63, 0x86, 0x33, 0x87, - 0x61, 0xc0, 0x5d, 0x3f, 0x60, 0xf1, 0x3a, 0x1b, 0xd1, 0xa2, 0xf3, 0x59, 0x30, 0x9c, 0xb0, 0xbe, - 0x8b, 0x6a, 0xa5, 0xa9, 0x06, 0x42, 0x16, 0x8f, 0x99, 0x77, 0x11, 0x85, 0x7e, 0xc0, 0xc9, 0x62, - 0x9c, 0xcf, 0x10, 0x61, 0x71, 0x42, 0x9b, 0x91, 0x5e, 0x92, 0x84, 0xb0, 0x18, 0x07, 0xe1, 0x4c, - 0x5a, 0x5c, 0x77, 0x14, 0xa5, 0x70, 0x16, 0xc7, 0x9d, 0xaa, 0xc6, 0x91, 0x12, 0xf8, 0xc4, 0x3d, - 0x67, 0x93, 0xa4, 0x53, 0x7b, 0x58, 0x12, 0xb8, 0xa4, 0xac, 0x87, 0xd0, 0x08, 0xc2, 0xbe, 0x7f, - 0x19, 0x72, 0x27, 0x0c, 0x79, 0x67, 0x83, 0x1c, 0x66, 0x42, 0x56, 0x07, 0x6a, 0xf1, 0x2c, 0xe0, - 0xfe, 0x94, 0x75, 0xea, 0x24, 0x32, 0x25, 0xc5, 0x5a, 0x35, 0x7c, 0x16, 0x8f, 0x92, 0x0e, 0x90, - 0x60, 0x13, 0xb2, 0x3e, 0x86, 0x56, 0xb6, 0x93, 0x23, 0x3f, 0xee, 0x34, 0x48, 0x42, 0x1e, 0xb4, - 0x5f, 0xc2, 0xfe, 0x92, 0x2f, 0x55, 0x9e, 0x1d, 0x40, 0xdd, 0x4b, 0x41, 0xf2, 0x69, 0xa3, 0xb7, - 0x75, 0x40, 0x99, 0x7b, 0x90, 0x31, 0x67, 0x2c, 0x28, 0xaa, 0x75, 0xe2, 0x8f, 0x02, 0x77, 0xf2, - 0xfe, 0x19, 0x23, 0x3c, 0x46, 0x4b, 0x54, 0x7e, 0x2a, 0xca, 0xde, 0x82, 0x76, 0x2a, 0x4a, 0x05, - 0xfd, 0xaf, 0x25, 0xd8, 0x7e, 0x36, 0x1c, 0xde, 0x92, 0x93, 0x98, 0xd8, 0x9c, 0xc5, 0x98, 0xfa, - 0x28, 0xb1, 0x48, 0xee, 0xd4, 0xb4, 0xf5, 0x11, 0x94, 0x67, 0x09, 0xee, 0xa4, 0x44, 0x3b, 0x69, - 0xa8, 0x9d, 0x9c, 0x21, 0xe4, 0xd0, 0x84, 0x65, 0x41, 0xd9, 0x15, 0xbe, 0x2c, 0x93, 0x2f, 0x69, - 0x2c, 0x4c, 0x66, 0xc1, 0x25, 0xc6, 0x59, 0x40, 0x62, 0x28, 0x10, 0xef, 0x6a, 0xa8, 0x22, 0x2c, - 0x86, 0xe9, 0xb6, 0x6a, 0xd9, 0xb6, 0x74, 0xda, 0x6c, 0xac, 0x4e, 0x9b, 0xfa, 0x9a, 0xb4, 0x81, - 0x5c, 0xda, 0xd8, 0xd0, 0xf4, 0xdc, 0xc8, 0x3d, 0xf7, 0x27, 0x3e, 0xf7, 0x59, 0x82, 0xf1, 0x13, - 0x46, 0xe4, 0x30, 0xeb, 0x11, 0x6c, 0xba, 0x51, 0xe4, 0xc6, 0xd3, 0x30, 0x46, 0xd7, 0xbc, 0xf5, - 0x27, 0xac, 0xd3, 0x24, 0x21, 0x8b, 0xb0, 0x90, 0x96, 0xb0, 0x89, 0x1f, 0xcc, 0xe6, 0xaf, 0x44, - 0xf6, 0x75, 0x5a, 0xc4, 0x96, 0xc3, 0x84, 0xb4, 0x20, 0x7c, 0xcd, 0xae, 0xfa, 0xb1, 0x7f, 0x89, - 0x6b, 0x46, 0xa8, 0xb4, 0x4d, 0x5e, 0x5c, 0x84, 0xad, 0x6f, 0x62, 0x62, 0x4e, 0xfc, 0xa9, 0xcf, - 0x93, 0xce, 0x26, 0x9a, 0xd5, 0xe8, 0xb5, 0x94, 0x3f, 0x1d, 0x42, 0x9d, 0x74, 0xd6, 0x3e, 0x82, - 0xaa, 0x84, 0x84, 0x7b, 0x05, 0x8b, 0x8a, 0x16, 0x8d, 0x05, 0x96, 0x84, 0x6f, 0x39, 0xc5, 0xaa, - 0xec, 0xd0, 0x58, 0x60, 0x63, 0x37, 0x1e, 0x52, 0x9c, 0x10, 0x13, 0x63, 0xdb, 0x81, 0xb2, 0x08, - 0x94, 0x70, 0xf5, 0x4c, 0x05, 0xbc, 0xe5, 0x88, 0xa1, 0x40, 0x46, 0x2a, 0xa7, 0x10, 0xc1, 0xa1, - 0xf5, 0x0d, 0x68, 0xbb, 0xc3, 0x21, 0xba, 0x27, 0xc4, 0xa8, 0x7f, 0xee, 0x0f, 0x13, 0x94, 0x54, - 0xc2, 0xc9, 0x05, 0xd4, 0xde, 0x01, 0xcb, 0x4c, 0x28, 0x95, 0x67, 0xbf, 0x2e, 0xe8, 0x03, 0xa1, - 0xcf, 0xc9, 0xba, 0x6c, 0xfb, 0x7e, 0xae, 0x7a, 0x14, 0x29, 0xaf, 0xb6, 0xd3, 0x13, 0x92, 0xad, - 0x36, 0x0b, 0xca, 0xd2, 0xa1, 0x2c, 0xad, 0x3a, 0x94, 0x5d, 0xe8, 0x2c, 0xdb, 0xa0, 0x0c, 0xf4, - 0x60, 0xff, 0x88, 0x4d, 0xd8, 0xfb, 0xd8, 0x87, 0x9e, 0x0c, 0x5c, 0x2c, 0x1d, 0xf2, 0xc0, 0xd1, - 0xf8, 0xfd, 0x0d, 0x58, 0x56, 0xa2, 0x0c, 0x38, 0x86, 0xdd, 0x57, 0x7e, 0xc2, 0x6f, 0x57, 0xbf, - 0xa4, 0xaa, 0xb8, 0x4a, 0xd5, 0xef, 0x0b, 0x00, 0x99, 0x2c, 0x6d, 0x73, 0xc1, 0xb0, 0x19, 0x31, - 0x36, 0xf7, 0xb9, 0x3a, 0xd1, 0x34, 0x16, 0x71, 0xe7, 0x5e, 0xa4, 0x2e, 0x19, 0x31, 0x14, 0x15, - 0x71, 0x16, 0xf8, 0xf3, 0x93, 0xd0, 0xbb, 0x60, 0x3c, 0xa1, 0x8a, 0x8d, 0xd5, 0xd4, 0x80, 0xe8, - 0x58, 0x8e, 0xd9, 0x64, 0x42, 0x65, 0x7b, 0xc3, 0x91, 0x84, 0xa8, 0xb1, 0x6c, 0x1a, 0xf1, 0xeb, - 0xd7, 0x27, 0x78, 0xa8, 0xc5, 0x09, 0x4b, 0x49, 0xdc, 0xe9, 0xde, 0xe2, 0x4e, 0x55, 0x69, 0x7c, - 0x02, 0x8d, 0x6c, 0x17, 0x09, 0x1a, 0x5b, 0x5a, 0x1d, 0x7a, 0x93, 0xcb, 0x7e, 0x00, 0xcd, 0x13, - 0x8e, 0x41, 0x5d, 0xe3, 0x2f, 0xfb, 0x11, 0xb4, 0x75, 0x5d, 0x25, 0x46, 0x59, 0x19, 0x5c, 0x3e, - 0x4b, 0x14, 0x97, 0xa2, 0xec, 0xbf, 0x95, 0xa0, 0xa6, 0x12, 0x37, 0xad, 0x3e, 0x85, 0xac, 0xfa, - 0xfc, 0x5f, 0x8a, 0xe0, 0x87, 0x50, 0x4f, 0xae, 0x13, 0xce, 0xa6, 0x7d, 0x55, 0x0a, 0x5b, 0x4e, - 0x06, 0x7c, 0x55, 0x10, 0xb3, 0x82, 0xf8, 0xf7, 0x02, 0xd4, 0x75, 0x98, 0xff, 0xeb, 0x86, 0xe5, - 0x13, 0xa8, 0x47, 0x32, 0xf0, 0x4c, 0xd6, 0xb5, 0x46, 0xaf, 0xad, 0x14, 0xa5, 0x95, 0x2c, 0x63, - 0x30, 0xf2, 0xa7, 0x6c, 0xe6, 0x8f, 0xd1, 0x90, 0x54, 0x72, 0x0d, 0x09, 0x06, 0x3f, 0x12, 0x05, - 0xb3, 0x4a, 0x05, 0x93, 0xc6, 0x66, 0x0b, 0x52, 0xcb, 0xb5, 0x20, 0xf6, 0x67, 0x50, 0x3b, 0x76, - 0xbd, 0x31, 0xee, 0x43, 0x2c, 0xf4, 0x22, 0x95, 0xa6, 0xb8, 0x50, 0x8c, 0x85, 0x92, 0x29, 0x43, - 0x7f, 0x5f, 0xab, 0xea, 0xae, 0x28, 0xfb, 0x02, 0xdb, 0x04, 0x79, 0x0c, 0xd4, 0x61, 0x7a, 0x8c, - 0x65, 0x34, 0x75, 0x48, 0x7a, 0x96, 0x96, 0x1b, 0x0d, 0x83, 0x07, 0xc3, 0x52, 0x9b, 0x4a, 0xcd, - 0xaa, 0xea, 0xa6, 0x3e, 0x50, 0xf6, 0x38, 0xe9, 0xb4, 0xfd, 0xdb, 0x02, 0xec, 0xc9, 0x2e, 0xf2, - 0xd6, 0x5e, 0x71, 0x75, 0x77, 0x22, 0xdd, 0x57, 0xca, 0xb9, 0xef, 0x09, 0xd4, 0x63, 0x96, 0x84, - 0xb3, 0x18, 0xdd, 0x4c, 0x9e, 0x6d, 0xf4, 0x76, 0xd3, 0x93, 0x44, 0xba, 0x1c, 0x35, 0xeb, 0x64, - 0x7c, 0xf6, 0x1f, 0x4b, 0xd0, 0xce, 0xcf, 0x8a, 0x8a, 0x75, 0x3e, 0xb9, 0xf0, 0xc3, 0x2f, 0x65, - 0xfb, 0x5b, 0x20, 0x37, 0x99, 0x90, 0x38, 0x55, 0xe8, 0xcb, 0x13, 0xbc, 0x03, 0x51, 0x93, 0x74, - 0x63, 0x06, 0xa8, 0xd9, 0x3e, 0x8b, 0xfd, 0x30, 0xbd, 0x2e, 0x33, 0x40, 0x94, 0x01, 0x24, 0xde, - 0xcc, 0x42, 0xee, 0x92, 0x91, 0x65, 0x47, 0xd3, 0xd4, 0xf7, 0x62, 0x8c, 0x18, 0x3f, 0x14, 0x51, - 0xab, 0xa8, 0xbe, 0x57, 0x23, 0xd9, 0xfc, 0x31, 0x9b, 0x26, 0xea, 0x98, 0x1b, 0x88, 0xb0, 0x5c, - 0x46, 0xf3, 0x95, 0x48, 0x6a, 0x4a, 0x0c, 0xb4, 0xdc, 0x80, 0x84, 0x04, 0x49, 0x9e, 0x5c, 0xb9, - 0x11, 0x1d, 0xfb, 0xb2, 0x63, 0x20, 0x98, 0xc8, 0xdb, 0x92, 0x42, 0x6f, 0xe0, 0x57, 0x8e, 0x2b, - 0x2e, 0x66, 0x2a, 0x03, 0x65, 0x67, 0x79, 0x42, 0x70, 0x5f, 0xb0, 0x38, 0x60, 0x93, 0x63, 0x43, - 0x2b, 0x48, 0xee, 0xa5, 0x09, 0xab, 0x07, 0x3b, 0x12, 0x3c, 0x3d, 0xec, 0x9b, 0x0b, 0x1a, 0xb4, - 0x60, 0xe5, 0x9c, 0xf8, 0x16, 0x5b, 0xca, 0x13, 0x75, 0xe1, 0x7d, 0x17, 0x5a, 0x2f, 0x2e, 0x19, - 0x56, 0xf0, 0x34, 0x73, 0xd0, 0xef, 0xe2, 0x00, 0x60, 0x36, 0x4c, 0x23, 0x15, 0xb5, 0x0c, 0xb0, - 0x13, 0xa8, 0x10, 0xfb, 0xca, 0x86, 0x47, 0x26, 0x5d, 0x51, 0x27, 0x5d, 0x3e, 0xc5, 0x5a, 0x3a, - 0xc5, 0x54, 0x32, 0x96, 0xb3, 0x64, 0xcc, 0x29, 0xad, 0x2c, 0x2a, 0xfd, 0x5d, 0x11, 0x9a, 0xaf, - 0x19, 0xbf, 0x0a, 0xe3, 0x0b, 0x71, 0xb8, 0x92, 0x95, 0xf7, 0xe8, 0x3d, 0xfc, 0xec, 0x9b, 0x0f, - 0xce, 0xaf, 0xb9, 0x4e, 0xa6, 0x5a, 0x3c, 0x7f, 0x2e, 0x48, 0xeb, 0x3e, 0x00, 0x4e, 0xf5, 0x5d, - 0x79, 0x77, 0xaa, 0x5c, 0x8a, 0xe7, 0x0a, 0xb0, 0x3e, 0x80, 0xba, 0x33, 0x1f, 0x60, 0x0d, 0x0e, - 0xe3, 0x24, 0x4d, 0xa6, 0x78, 0xfe, 0x82, 0x68, 0xb1, 0x16, 0x27, 0x87, 0x71, 0x18, 0x45, 0x6c, - 0x98, 0x9a, 0x16, 0xcf, 0x8f, 0x24, 0x20, 0xb4, 0x9e, 0xa6, 0x5a, 0xab, 0x52, 0x2b, 0xcf, 0xb4, - 0xe2, 0x54, 0xa4, 0xb4, 0xd6, 0xd4, 0xa6, 0x4c, 0xad, 0xa7, 0x5a, 0xab, 0x4c, 0xa1, 0x0d, 0x6e, - 0x68, 0x3d, 0xcd, 0xb4, 0xd6, 0xd3, 0xb5, 0x4a, 0xab, 0xfd, 0x97, 0x02, 0x6c, 0x60, 0x2a, 0x9f, - 0x25, 0xee, 0x88, 0xe1, 0xad, 0xd7, 0xe0, 0x98, 0xf6, 0x93, 0xc1, 0x4c, 0x90, 0x2a, 0x64, 0x40, - 0x90, 0x64, 0xf8, 0x1a, 0x34, 0x23, 0x16, 0x63, 0x82, 0x2b, 0x8e, 0x22, 0x16, 0x21, 0x4c, 0x68, - 0x89, 0x49, 0x96, 0x03, 0xb8, 0x4b, 0x73, 0x03, 0x3f, 0x18, 0xc8, 0x0c, 0x9a, 0x86, 0x43, 0xa6, - 0x5c, 0xb5, 0x4d, 0x53, 0x2f, 0x83, 0x2f, 0xf4, 0x84, 0xf5, 0x6d, 0xd8, 0xd6, 0xfc, 0xe2, 0x66, - 0x25, 0x6e, 0xe9, 0xba, 0x4d, 0xc5, 0x7d, 0xa6, 0x60, 0xfb, 0x57, 0xd0, 0x3e, 0x1d, 0xc7, 0x21, - 0xe7, 0x78, 0xf5, 0x8c, 0x8e, 0x5c, 0x3c, 0xa0, 0x58, 0x75, 0x23, 0x3a, 0xc6, 0x89, 0xb2, 0x36, - 0x25, 0xad, 0xef, 0xc0, 0x36, 0x97, 0xbc, 0x6c, 0x38, 0x48, 0x79, 0x64, 0x34, 0xb7, 0xf4, 0x44, - 0x5f, 0x31, 0x7f, 0x1d, 0xda, 0x19, 0x33, 0xd5, 0x70, 0x69, 0x6f, 0x4b, 0xa3, 0xa7, 0xa2, 0x92, - 0xff, 0x41, 0x3a, 0x4b, 0x66, 0xce, 0x27, 0x54, 0x55, 0x0c, 0x57, 0x35, 0x7a, 0x9b, 0x69, 0x35, - 0x56, 0xce, 0xa0, 0x4a, 0x22, 0xdd, 0xf2, 0x63, 0xd8, 0xe4, 0xda, 0xf4, 0x01, 0x1e, 0x20, 0x57, - 0x95, 0xe4, 0xb4, 0x22, 0xe6, 0x37, 0xe6, 0xb4, 0x79, 0x7e, 0xa3, 0xe8, 0x79, 0xd9, 0x26, 0x28, - 0x85, 0xd2, 0xbe, 0x86, 0xc4, 0x48, 0x85, 0xfd, 0x23, 0xa8, 0x63, 0x0f, 0x91, 0x48, 0xeb, 0xd0, - 0x31, 0xde, 0x2c, 0x8e, 0xf1, 0x7c, 0xa5, 0x8e, 0x51, 0xa4, 0xe8, 0x31, 0xe8, 0x8a, 0x55, 0xce, - 0x90, 0x84, 0x1d, 0x02, 0xc8, 0x63, 0x4e, 0xda, 0x90, 0xc7, 0x4c, 0x01, 0x49, 0x88, 0x3c, 0x9b, - 0xba, 0x73, 0x1d, 0x7a, 0xca, 0x33, 0x04, 0xe4, 0x06, 0x51, 0xe1, 0x5b, 0xd7, 0x9f, 0x78, 0xea, - 0x7d, 0x00, 0x15, 0x2a, 0x32, 0x53, 0x58, 0x36, 0x15, 0xfe, 0xb9, 0x08, 0x0d, 0xa9, 0x51, 0x1a, - 0x8c, 0x5c, 0x1e, 0x5e, 0x46, 0x5a, 0x25, 0x11, 0xd8, 0x2e, 0x54, 0x32, 0x75, 0x59, 0xeb, 0x98, - 0x99, 0x9a, 0xda, 0x86, 0x97, 0x63, 0x82, 0xf5, 0xd2, 0xf0, 0xce, 0x4a, 0xee, 0xba, 0x60, 0x92, - 0x06, 0x7f, 0x0a, 0x4d, 0x99, 0x9f, 0x6a, 0x4d, 0x79, 0xdd, 0x9a, 0x86, 0x64, 0x93, 0xab, 0x9e, - 0x88, 0x0e, 0x0d, 0xed, 0xa5, 0x8e, 0xa0, 0xd1, 0xbb, 0x9f, 0x63, 0xa7, 0x9d, 0x1c, 0xd0, 0xef, - 0x8b, 0x80, 0x63, 0x69, 0x96, 0xbc, 0xdd, 0xa7, 0x00, 0x19, 0x28, 0x6a, 0xd6, 0x05, 0xbb, 0x4e, - 0x3b, 0x51, 0x1c, 0x8a, 0xbd, 0x5f, 0xba, 0x93, 0x59, 0xea, 0x54, 0x49, 0xfc, 0xb0, 0xf8, 0xb4, - 0x80, 0x5f, 0x31, 0x9b, 0xcf, 0xc5, 0x3d, 0x67, 0x2c, 0xcf, 0x3d, 0x6b, 0x95, 0x57, 0x3e, 0x6b, - 0x95, 0xd3, 0x67, 0x2d, 0x2c, 0xa3, 0x61, 0xa4, 0x6e, 0x65, 0x1c, 0x65, 0x8a, 0xca, 0x86, 0x22, - 0xfb, 0x9f, 0x65, 0x80, 0x4c, 0x8b, 0x75, 0x02, 0x5d, 0x3f, 0x1c, 0x88, 0x4b, 0xc5, 0xf7, 0x98, - 0x2c, 0x48, 0x83, 0x98, 0x61, 0xfa, 0x24, 0xfe, 0x25, 0x53, 0x7d, 0xc7, 0x9e, 0xda, 0xf7, 0x82, - 0x71, 0xce, 0x3e, 0x52, 0x72, 0x21, 0x55, 0x2e, 0x27, 0x5d, 0x66, 0xfd, 0x0c, 0x76, 0x33, 0xa1, - 0x43, 0x43, 0x5e, 0xf1, 0x46, 0x79, 0x77, 0xb5, 0xbc, 0x61, 0x26, 0xeb, 0x27, 0x80, 0xf0, 0x00, - 0xef, 0x98, 0x59, 0x4e, 0x52, 0xe9, 0x46, 0x49, 0xdb, 0x7e, 0xf8, 0x86, 0x56, 0x64, 0x72, 0xde, - 0xc0, 0x3d, 0x63, 0xa3, 0xe2, 0xd8, 0x1b, 0xd2, 0xca, 0x37, 0x4a, 0xdb, 0xd3, 0x76, 0x89, 0xc2, - 0x90, 0x89, 0xfc, 0x02, 0x70, 0x66, 0x70, 0xe5, 0xfa, 0x7c, 0x51, 0x5e, 0xe5, 0xb6, 0x7d, 0x7e, - 0x89, 0x8b, 0xf2, 0xc2, 0xe4, 0x3e, 0xa7, 0x2c, 0x1e, 0xe5, 0xf6, 0x59, 0xbd, 0x6d, 0x9f, 0xc7, - 0xb4, 0x22, 0x93, 0xf3, 0x1c, 0x10, 0x5c, 0xb4, 0xa7, 0x76, 0xa3, 0x94, 0x4d, 0x3f, 0xcc, 0xdb, - 0x72, 0x08, 0xdb, 0x09, 0xf3, 0x38, 0xde, 0x28, 0x86, 0x8c, 0x8d, 0x1b, 0x65, 0x6c, 0xa9, 0x05, - 0x5a, 0x88, 0xfd, 0x0e, 0x9a, 0x3f, 0x9d, 0x8d, 0x18, 0x9f, 0x9c, 0xeb, 0x33, 0xff, 0xbf, 0x2e, - 0x33, 0xff, 0xc6, 0x32, 0x73, 0x38, 0x8a, 0xc3, 0x59, 0x94, 0xab, 0xda, 0xf2, 0x0c, 0x2f, 0x55, - 0x6d, 0xe2, 0xa1, 0xaa, 0x2d, 0xb9, 0x3f, 0x83, 0xa6, 0x6c, 0xb2, 0xd4, 0x02, 0x59, 0x85, 0xac, - 0xe5, 0x43, 0x9f, 0x36, 0x75, 0x72, 0x59, 0x4f, 0x35, 0xac, 0x6a, 0x55, 0xbe, 0x1a, 0x65, 0x6e, - 0xc2, 0x2f, 0x96, 0xec, 0xd4, 0xbd, 0x84, 0xd6, 0x58, 0xfa, 0x46, 0xad, 0x92, 0x09, 0xf8, 0x71, - 0x6a, 0x5c, 0xb6, 0x87, 0x03, 0xd3, 0x87, 0xd2, 0xd5, 0xcd, 0xb1, 0xe9, 0xd6, 0xef, 0x01, 0x88, - 0x4f, 0x92, 0x41, 0x5a, 0xa8, 0xcc, 0x17, 0x49, 0x7d, 0x43, 0xe0, 0xf7, 0x4f, 0x3a, 0xec, 0x9e, - 0xc2, 0xf6, 0x92, 0xcc, 0x15, 0x65, 0xea, 0x5b, 0x66, 0x99, 0x6a, 0xf4, 0xee, 0x2a, 0x91, 0xe6, - 0x52, 0xb3, 0x76, 0xfd, 0xa9, 0x20, 0xbf, 0x60, 0xf4, 0xa3, 0x91, 0xf5, 0x14, 0x5a, 0x81, 0x6c, - 0xbe, 0x74, 0x00, 0x4a, 0x86, 0x20, 0xb3, 0x31, 0x73, 0x9a, 0x81, 0xd9, 0xa6, 0x61, 0x20, 0x3c, - 0xf2, 0xc0, 0xca, 0x40, 0x18, 0xce, 0x71, 0x1a, 0x9e, 0x11, 0xed, 0x5c, 0x33, 0x58, 0x5a, 0x6c, - 0x06, 0xd5, 0x43, 0xc3, 0xba, 0x57, 0xd2, 0xde, 0xbf, 0xaa, 0x50, 0x7a, 0xd6, 0x7f, 0x69, 0x9d, - 0xc1, 0xd6, 0xe2, 0x9f, 0x0c, 0xd6, 0x03, 0xa5, 0x7a, 0xcd, 0x1f, 0x13, 0xdd, 0x8f, 0xd6, 0xce, - 0xab, 0x6e, 0xf9, 0x8e, 0xe5, 0xc0, 0xe6, 0xc2, 0x93, 0xb2, 0x95, 0x5e, 0x27, 0xab, 0x9f, 0xed, - 0xbb, 0x0f, 0xd6, 0x4d, 0x9b, 0x32, 0x17, 0xda, 0x73, 0x2d, 0x73, 0xf5, 0xe7, 0x9d, 0x96, 0xb9, - 0xae, 0xab, 0xbf, 0x63, 0xfd, 0x00, 0xaa, 0xf2, 0x91, 0xd9, 0xda, 0x51, 0xbc, 0xb9, 0xe7, 0xeb, - 0xee, 0xee, 0x02, 0xaa, 0x17, 0xbe, 0x82, 0x56, 0xee, 0x9f, 0x09, 0xeb, 0x83, 0x9c, 0xae, 0xfc, - 0x1b, 0x75, 0xf7, 0xc3, 0xd5, 0x93, 0x5a, 0xda, 0x21, 0x40, 0xf6, 0x0e, 0x69, 0x75, 0x14, 0xf7, - 0xd2, 0x5b, 0x77, 0xf7, 0xde, 0x8a, 0x19, 0x2d, 0x04, 0x43, 0xb9, 0xf8, 0x62, 0x68, 0x2d, 0x78, - 0x75, 0xf1, 0xbd, 0x4e, 0x87, 0x72, 0xed, 0x53, 0x23, 0x89, 0x5d, 0x7c, 0x07, 0xd4, 0x62, 0xd7, - 0xbc, 0x42, 0x6a, 0xb1, 0x6b, 0x1f, 0x10, 0xef, 0x58, 0x3f, 0x87, 0x76, 0xfe, 0x61, 0xcd, 0x4a, - 0x9d, 0xb4, 0xf2, 0x65, 0xb1, 0x7b, 0x7f, 0xcd, 0xac, 0x16, 0xf8, 0x29, 0x54, 0xe4, 0x8b, 0x59, - 0x7a, 0xe4, 0xcc, 0x87, 0xb6, 0xee, 0x4e, 0x1e, 0xd4, 0xab, 0x1e, 0x43, 0x55, 0x7e, 0xd8, 0xe9, - 0x04, 0xc8, 0x7d, 0xe7, 0x75, 0x9b, 0x26, 0x6a, 0xdf, 0x79, 0x5c, 0x48, 0xf5, 0x24, 0x39, 0x3d, - 0xc9, 0x2a, 0x3d, 0x46, 0x70, 0xce, 0xab, 0xf4, 0xaf, 0xdf, 0x93, 0xff, 0x04, 0x00, 0x00, 0xff, - 0xff, 0xbf, 0xea, 0xc8, 0x11, 0x02, 0x1c, 0x00, 0x00, -} diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto deleted file mode 100644 index 478f990e7..000000000 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto +++ /dev/null @@ -1,313 +0,0 @@ -syntax = "proto3"; - -package types; - -service API { - rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} - rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} - rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} - rpc Signal(SignalRequest) returns (SignalResponse) {} - rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} - rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} - rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} - rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} - rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} - rpc State(StateRequest) returns (StateResponse) {} - rpc Events(EventsRequest) returns (stream Event) {} - rpc Stats(StatsRequest) returns (StatsResponse) {} -} - -message GetServerVersionRequest { -} - -message GetServerVersionResponse { - uint32 major = 1; - uint32 minor = 2; - uint32 patch = 3; - string revision = 4; -} - -message UpdateProcessRequest { - string id = 1; - string pid = 2; - bool closeStdin = 3; // Close stdin of the container - uint32 width = 4; - uint32 height = 5; -} - -message UpdateProcessResponse { -} - -message CreateContainerRequest { - string id = 1; // ID of container - string bundlePath = 2; // path to OCI bundle - string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) - string stdin = 4; // path to the file where stdin will be read (optional) - string stdout = 5; // path to file where stdout will be written (optional) - string stderr = 6; // path to file where stderr will be written (optional) - repeated string labels = 7; - bool noPivotRoot = 8; - string runtime = 9; - repeated string runtimeArgs = 10; - string checkpointDir = 11; // Directory where checkpoints are stored -} - -message CreateContainerResponse { - Container container = 1; -} - -message SignalRequest { - string id = 1; // ID of container - string pid = 2; // PID of process inside container - uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" -} - -message SignalResponse { -} - -message AddProcessRequest { - string id = 1; // ID of container - bool terminal = 2; // Use tty for container stdio - User user = 3; // User under which process will be run - repeated string args = 4; // Arguments for process, first is binary path itself - repeated string env = 5; // List of environment variables for process - string cwd = 6; // Workind directory of process - string pid = 7; // Process ID - string stdin = 8; // path to the file where stdin will be read (optional) - string stdout = 9; // path to file where stdout will be written (optional) - string stderr = 10; // path to file where stderr will be written (optional) - repeated string capabilities = 11; - string apparmorProfile = 12; - string selinuxLabel = 13; - bool noNewPrivileges = 14; - repeated Rlimit rlimits = 15; -} - -message Rlimit { - string type = 1; - uint64 soft = 2; - uint64 hard = 3; -} - -message User { - uint32 uid = 1; // UID of user - uint32 gid = 2; // GID of user - repeated uint32 additionalGids = 3; // Additional groups to which user will be added -} - -message AddProcessResponse { -} - -message CreateCheckpointRequest { - string id = 1; // ID of container - Checkpoint checkpoint = 2; // Checkpoint configuration - string checkpointDir = 3; // Directory where checkpoints are stored -} - -message CreateCheckpointResponse { -} - -message DeleteCheckpointRequest { - string id = 1; // ID of container - string name = 2; // Name of checkpoint - string checkpointDir = 3; // Directory where checkpoints are stored -} - -message DeleteCheckpointResponse { -} - -message ListCheckpointRequest { - string id = 1; // ID of container - string checkpointDir = 2; // Directory where checkpoints are stored -} - -message Checkpoint { - string name = 1; // Name of checkpoint - bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not - bool tcp = 3; // allow open tcp connections - bool unixSockets = 4; // allow external unix sockets - bool shell = 5; // allow shell-jobs - repeated string emptyNS = 6; -} - -message ListCheckpointResponse { - repeated Checkpoint checkpoints = 1; // List of checkpoints -} - -message StateRequest { - string id = 1; // container id for a single container -} - -message ContainerState { - string status = 1; -} - -message Process { - string pid = 1; - bool terminal = 2; // Use tty for container stdio - User user = 3; // User under which process will be run - repeated string args = 4; // Arguments for process, first is binary path itself - repeated string env = 5; // List of environment variables for process - string cwd = 6; // Workind directory of process - uint32 systemPid = 7; - string stdin = 8; // path to the file where stdin will be read (optional) - string stdout = 9; // path to file where stdout will be written (optional) - string stderr = 10; // path to file where stderr will be written (optional) - repeated string capabilities = 11; - string apparmorProfile = 12; - string selinuxLabel = 13; - bool noNewPrivileges = 14; - repeated Rlimit rlimits = 15; -} - -message Container { - string id = 1; // ID of container - string bundlePath = 2; // Path to OCI bundle - repeated Process processes = 3; // List of processes which run in container - string status = 4; // Container status ("running", "paused", etc.) - repeated string labels = 5; - repeated uint32 pids = 6; - string runtime = 7; // runtime used to execute the container -} - -// Machine is information about machine on which containerd is run -message Machine { - uint32 cpus = 1; // number of cpus - uint64 memory = 2; // amount of memory -} - -// StateResponse is information about containerd daemon -message StateResponse { - repeated Container containers = 1; - Machine machine = 2; -} - -message UpdateContainerRequest { - string id = 1; // ID of container - string pid = 2; - string status = 3; // Status to whcih containerd will try to change - UpdateResource resources =4; -} - -message UpdateResource { - uint64 blkioWeight =1; - uint64 cpuShares = 2; - uint64 cpuPeriod = 3; - uint64 cpuQuota = 4; - string cpusetCpus = 5; - string cpusetMems = 6; - uint64 memoryLimit = 7; - uint64 memorySwap = 8; - uint64 memoryReservation = 9; - uint64 kernelMemoryLimit = 10; - uint64 kernelTCPMemoryLimit = 11; -} - -message UpdateContainerResponse { -} - -message EventsRequest { - uint64 timestamp = 1; -} - -message Event { - string type = 1; - string id = 2; - uint32 status = 3; - string pid = 4; - uint64 timestamp = 5; -} - -message NetworkStats { - string name = 1; // name of network interface - uint64 rx_bytes = 2; - uint64 rx_Packets = 3; - uint64 Rx_errors = 4; - uint64 Rx_dropped = 5; - uint64 Tx_bytes = 6; - uint64 Tx_packets = 7; - uint64 Tx_errors = 8; - uint64 Tx_dropped = 9; -} - -message CpuUsage { - uint64 total_usage = 1; - repeated uint64 percpu_usage = 2; - uint64 usage_in_kernelmode = 3; - uint64 usage_in_usermode = 4; -} - -message ThrottlingData { - uint64 periods = 1; - uint64 throttled_periods = 2; - uint64 throttled_time = 3; -} - -message CpuStats { - CpuUsage cpu_usage = 1; - ThrottlingData throttling_data = 2; - uint64 system_usage = 3; -} - -message PidsStats { - uint64 current = 1; - uint64 limit = 2; -} - -message MemoryData { - uint64 usage = 1; - uint64 max_usage = 2; - uint64 failcnt = 3; - uint64 limit = 4; -} - -message MemoryStats { - uint64 cache = 1; - MemoryData usage = 2; - MemoryData swap_usage = 3; - MemoryData kernel_usage = 4; - map stats = 5; -} - -message BlkioStatsEntry { - uint64 major = 1; - uint64 minor = 2; - string op = 3; - uint64 value = 4; -} - -message BlkioStats { - repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes tranferred to and from the block device - repeated BlkioStatsEntry io_serviced_recursive = 2; - repeated BlkioStatsEntry io_queued_recursive = 3; - repeated BlkioStatsEntry io_service_time_recursive = 4; - repeated BlkioStatsEntry io_wait_time_recursive = 5; - repeated BlkioStatsEntry io_merged_recursive = 6; - repeated BlkioStatsEntry io_time_recursive = 7; - repeated BlkioStatsEntry sectors_recursive = 8; -} - -message HugetlbStats { - uint64 usage = 1; - uint64 max_usage = 2; - uint64 failcnt = 3; - uint64 limit = 4; -} - -message CgroupStats { - CpuStats cpu_stats = 1; - MemoryStats memory_stats = 2; - BlkioStats blkio_stats = 3; - map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" - PidsStats pids_stats = 5; -} - -message StatsResponse { - repeated NetworkStats network_stats = 1; - CgroupStats cgroup_stats = 2; - uint64 timestamp = 3; -}; - -message StatsRequest { - string id = 1; -} diff --git a/vendor/src/github.com/docker/distribution/.gitignore b/vendor/src/github.com/docker/distribution/.gitignore deleted file mode 100644 index 1c3ae0a77..000000000 --- a/vendor/src/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace diff --git a/vendor/src/github.com/docker/distribution/.mailmap b/vendor/src/github.com/docker/distribution/.mailmap deleted file mode 100644 index d99106019..000000000 --- a/vendor/src/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/src/github.com/docker/distribution/AUTHORS b/vendor/src/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 9e80e062b..000000000 --- a/vendor/src/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,147 +0,0 @@ -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Chris Dillon -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Eric Yang -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -Jason Freidman -Jeff Nickoloff -Jessie Frazelle -jhaohai -Jianqing Wang -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Louis Kottmann -Luke Carpenter -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Michael Prokop -Michal Minar -Miquel Sabaté -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -Spencer Rinehart -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/src/github.com/docker/distribution/BUILDING.md b/vendor/src/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index d9577022b..000000000 --- a/vendor/src/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,119 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/src/github.com/docker/distribution/CHANGELOG.md b/vendor/src/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index 3445c090c..000000000 --- a/vendor/src/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,35 +0,0 @@ -# Changelog - -## 2.5.0 (2016-06-14) - -### Storage -- Ensure uploads directory is cleaned after upload is commited -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -### API -- Support returning HTTP 429 (Too Many Requests) - -### Documentation -- Update auth documentation examples to show "expires in" as int - -### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/src/github.com/docker/distribution/CONTRIBUTING.md b/vendor/src/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 7cc7aedff..000000000 --- a/vendor/src/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,140 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry -version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/src/github.com/docker/distribution/Dockerfile b/vendor/src/github.com/docker/distribution/Dockerfile deleted file mode 100644 index fa9cd4627..000000000 --- a/vendor/src/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.6-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN set -ex \ - && apk add --no-cache make git - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/src/github.com/docker/distribution/LICENSE b/vendor/src/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/src/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/src/github.com/docker/distribution/MAINTAINERS b/vendor/src/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda400150..000000000 --- a/vendor/src/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/src/github.com/docker/distribution/Makefile b/vendor/src/github.com/docker/distribution/Makefile deleted file mode 100644 index a0602d0b2..000000000 --- a/vendor/src/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,106 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: clean all fmt vet lint build test binaries -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Package list -PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT := $(shell which golint || echo '') -GODEP := $(shell which godep || echo '') - -${PREFIX}/bin/registry: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-save: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) save $(PKGS) - -dep-restore: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) restore -v - -dep-validate: dep-restore - @echo "+ $@" - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @rm -Rf Godeps - @$(GODEP) save ./... - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak diff --git a/vendor/src/github.com/docker/distribution/README.md b/vendor/src/github.com/docker/distribution/README.md deleted file mode 100644 index d35bcb682..000000000 --- a/vendor/src/github.com/docker/distribution/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](docs/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](docs/recipes/building.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/src/github.com/docker/distribution/ROADMAP.md b/vendor/src/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afe..000000000 --- a/vendor/src/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/src/github.com/docker/distribution/blobs.go b/vendor/src/github.com/docker/distribution/blobs.go deleted file mode 100644 index d12533011..000000000 --- a/vendor/src/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,245 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identifed by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/src/github.com/docker/distribution/circle.yml b/vendor/src/github.com/docker/distribution/circle.yml deleted file mode 100644 index 3d1ffd2f0..000000000 --- a/vendor/src/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.6 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/tools/godep: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 600 - pwd: $BASE_STABLE - - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Disabled the -race detector due to massive memory usage. - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/src/github.com/docker/distribution/context/context.go b/vendor/src/github.com/docker/distribution/context/context.go deleted file mode 100644 index 23cbf5b54..000000000 --- a/vendor/src/github.com/docker/distribution/context/context.go +++ /dev/null @@ -1,85 +0,0 @@ -package context - -import ( - "sync" - - "github.com/docker/distribution/uuid" - "golang.org/x/net/context" -) - -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - -// instanceContext is a context that provides only an instance id. It is -// provided as the main background context. -type instanceContext struct { - Context - id string // id of context, logged as "instance.id" - once sync.Once // once protect generation of the id -} - -func (ic *instanceContext) Value(key interface{}) interface{} { - if key == "instance.id" { - ic.once.Do(func() { - // We want to lazy initialize the UUID such that we don't - // call a random generator from the package initialization - // code. For various reasons random could not be available - // https://github.com/docker/distribution/issues/782 - ic.id = uuid.Generate().String() - }) - return ic.id - } - - return ic.Context.Value(key) -} - -var background = &instanceContext{ - Context: context.Background(), -} - -// Background returns a non-nil, empty Context. The background context -// provides a single key, "instance.id" that is globally unique to the -// process. -func Background() Context { - return background -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - -// stringMapContext is a simple context implementation that checks a map for a -// key, falling back to a parent if not present. -type stringMapContext struct { - context.Context - m map[string]interface{} -} - -// WithValues returns a context that proxies lookups through a map. Only -// supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } - - return stringMapContext{ - Context: ctx, - m: mo, - } -} - -func (smc stringMapContext) Value(key interface{}) interface{} { - if ks, ok := key.(string); ok { - if v, ok := smc.m[ks]; ok { - return v - } - } - - return smc.Context.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/doc.go b/vendor/src/github.com/docker/distribution/context/doc.go deleted file mode 100644 index 3b4ab8882..000000000 --- a/vendor/src/github.com/docker/distribution/context/doc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. -// -// The easiest way to get started is to get the background context: -// -// ctx := context.Background() -// -// The returned context should be passed around your application and be the -// root of all other context instances. If the application has a version, this -// line should be called before anything else: -// -// ctx := context.WithVersion(context.Background(), version) -// -// The above will store the version in the context and will be available to -// the logger. -// -// Logging -// -// The most useful aspect of this package is GetLogger. This function takes -// any context.Context interface and returns the current logger from the -// context. Canonical usage looks like this: -// -// GetLogger(ctx).Infof("something interesting happened") -// -// GetLogger also takes optional key arguments. The keys will be looked up in -// the context and reported with the logger. The following example would -// return a logger that prints the version with each log message: -// -// ctx := context.Context(context.Background(), "version", version) -// GetLogger(ctx, "version").Infof("this log message has a version field") -// -// The above would print out a log message like this: -// -// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m -// -// When used with WithLogger, we gain the ability to decorate the context with -// loggers that have information from disparate parts of the call stack. -// Following from the version example, we can build a new context with the -// configured logger such that we always print the version field: -// -// ctx = WithLogger(ctx, GetLogger(ctx, "version")) -// -// Since the logger has been pushed to the context, we can now get the version -// field for free with our log messages. Future calls to GetLogger on the new -// context will have the version field: -// -// GetLogger(ctx).Infof("this log message has a version field") -// -// This becomes more powerful when we start stacking loggers. Let's say we -// have the version logger from above but also want a request id. Using the -// context above, in our request scoped function, we place another logger in -// the context: -// -// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context -// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) -// -// When GetLogger is called on the new context, "http.request.id" will be -// included as a logger field, along with the original "version" field: -// -// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m -// -// Note that this only affects the new context, the previous context, with the -// version field, can be used independently. Put another way, the new logger, -// added to the request context, is unique to that context and can have -// request scoped varaibles. -// -// HTTP Requests -// -// This package also contains several methods for working with http requests. -// The concepts are very similar to those described above. We simply place the -// request in the context using WithRequest. This makes the request variables -// available. GetRequestLogger can then be called to get request specific -// variables in a log line: -// -// ctx = WithRequest(ctx, req) -// GetRequestLogger(ctx).Infof("request variables") -// -// Like above, if we want to include the request data in all log messages in -// the context, we push the logger to a new context and use that one: -// -// ctx = WithLogger(ctx, GetRequestLogger(ctx)) -// -// The concept is fairly powerful and ensures that calls throughout the stack -// can be traced in log messages. Using the fields like "http.request.id", one -// can analyze call flow for a particular request with a simple grep of the -// logs. -package context diff --git a/vendor/src/github.com/docker/distribution/context/http.go b/vendor/src/github.com/docker/distribution/context/http.go deleted file mode 100644 index 2cb1d0417..000000000 --- a/vendor/src/github.com/docker/distribution/context/http.go +++ /dev/null @@ -1,364 +0,0 @@ -package context - -import ( - "errors" - "net" - "net/http" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/uuid" - "github.com/gorilla/mux" -) - -// Common errors used with this package. -var ( - ErrNoRequestContext = errors.New("no http request in context") - ErrNoResponseWriterContext = errors.New("no http response in context") -) - -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - proxies := strings.Split(prior, ",") - if len(proxies) > 0 { - remoteAddr := strings.Trim(proxies[0], " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - -// WithRequest places the request on the context. The context of the request -// is assigned a unique id, available at "http.request.id". The request itself -// is available at "http.request". Other common attributes are available under -// the prefix "http.request.". If a request is already present on the context, -// this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { - if ctx.Value("http.request") != nil { - // NOTE(stevvooe): This needs to be considered a programming error. It - // is unlikely that we'd want to have more than one request in - // context. - panic("only one request per context") - } - - return &httpRequestContext{ - Context: ctx, - startedAt: time.Now(), - id: uuid.Generate().String(), - r: r, - } -} - -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - -// GetRequestID attempts to resolve the current request id, if possible. An -// error is return if it is not available on the context. -func GetRequestID(ctx Context) string { - return GetStringValue(ctx, "http.request.id") -} - -// WithResponseWriter returns a new context and response writer that makes -// interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - irw := instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } - - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: irw, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - - return &irw, &irw -} - -// GetResponseWriter returns the http.ResponseWriter from the provided -// context. If not present, ErrNoResponseWriterContext is returned. The -// returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { - v := ctx.Value("http.response") - - rw, ok := v.(http.ResponseWriter) - if !ok || rw == nil { - return nil, ErrNoResponseWriterContext - } - - return rw, nil -} - -// getVarsFromRequest let's us change request vars implementation for testing -// and maybe future changes. -var getVarsFromRequest = mux.Vars - -// WithVars extracts gorilla/mux vars and makes them available on the returned -// context. Variables are available at keys with the prefix "vars.". For -// example, if looking for the variable "name", it can be accessed as -// "vars.name". Implementations that are accessing values need not know that -// the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { - return &muxVarsContext{ - Context: ctx, - vars: getVarsFromRequest(r), - } -} - -// GetRequestLogger returns a logger that contains fields from the request in -// the current context. If the request is not available in the context, no -// fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { - return GetLogger(ctx, - "http.request.id", - "http.request.method", - "http.request.host", - "http.request.uri", - "http.request.referer", - "http.request.useragent", - "http.request.remoteaddr", - "http.request.contenttype") -} - -// GetResponseLogger reads the current response stats and builds a logger. -// Because the values are read at call time, pushing a logger returned from -// this function on the context will lead to missing or invalid data. Only -// call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { - l := getLogrusLogger(ctx, - "http.response.written", - "http.response.status", - "http.response.contenttype") - - duration := Since(ctx, "http.request.startedat") - - if duration > 0 { - l = l.WithField("http.response.duration", duration.String()) - } - - return l -} - -// httpRequestContext makes information about a request available to context. -type httpRequestContext struct { - Context - - startedAt time.Time - id string - r *http.Request -} - -// Value returns a keyed element of the request for use in the context. To get -// the request itself, query "request". For other components, access them as -// "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.request" { - return ctx.r - } - - if !strings.HasPrefix(keyStr, "http.request.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - switch parts[2] { - case "uri": - return ctx.r.RequestURI - case "remoteaddr": - return RemoteAddr(ctx.r) - case "method": - return ctx.r.Method - case "host": - return ctx.r.Host - case "referer": - referer := ctx.r.Referer() - if referer != "" { - return referer - } - case "useragent": - return ctx.r.UserAgent() - case "id": - return ctx.id - case "startedat": - return ctx.startedAt - case "contenttype": - ct := ctx.r.Header.Get("Content-Type") - if ct != "" { - return ct - } - } - } - -fallback: - return ctx.Context.Value(key) -} - -type muxVarsContext struct { - Context - vars map[string]string -} - -func (ctx *muxVarsContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "vars" { - return ctx.vars - } - - if strings.HasPrefix(keyStr, "vars.") { - keyStr = strings.TrimPrefix(keyStr, "vars.") - } - - if v, ok := ctx.vars[keyStr]; ok { - return v - } - } - - return ctx.Context.Value(key) -} - -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - -// instrumentedResponseWriter provides response writer information in a -// context. This variant is only used in the case where CloseNotifier is not -// implemented by the parent ResponseWriter. -type instrumentedResponseWriter struct { - http.ResponseWriter - Context - - mu sync.Mutex - status int - written int64 -} - -func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { - n, err = irw.ResponseWriter.Write(p) - - irw.mu.Lock() - irw.written += int64(n) - - // Guess the likely status if not set. - if irw.status == 0 { - irw.status = http.StatusOK - } - - irw.mu.Unlock() - - return -} - -func (irw *instrumentedResponseWriter) WriteHeader(status int) { - irw.ResponseWriter.WriteHeader(status) - - irw.mu.Lock() - irw.status = status - irw.mu.Unlock() -} - -func (irw *instrumentedResponseWriter) Flush() { - if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - - if !strings.HasPrefix(keyStr, "http.response.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - irw.mu.Lock() - defer irw.mu.Unlock() - - switch parts[2] { - case "written": - return irw.written - case "status": - return irw.status - case "contenttype": - contentType := irw.Header().Get("Content-Type") - if contentType != "" { - return contentType - } - } - } - -fallback: - return irw.Context.Value(key) -} - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/logger.go b/vendor/src/github.com/docker/distribution/context/logger.go deleted file mode 100644 index fbb6a0511..000000000 --- a/vendor/src/github.com/docker/distribution/context/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package context - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "runtime" -) - -// Logger provides a leveled-logging interface. -type Logger interface { - // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) - - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) - - // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) - - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) - - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) - - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) -} - -// WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) -} - -// GetLoggerWithField returns a logger instance with the specified field key -// and value without affecting the context. Extra specified keys will be -// resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) -} - -// GetLoggerWithFields returns a logger instance with the specified fields -// without affecting the context. Extra specified keys will be resolved from -// the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { - // must convert from interface{} -> interface{} to string -> interface{} for logrus. - lfields := make(logrus.Fields, len(fields)) - for key, value := range fields { - lfields[fmt.Sprint(key)] = value - } - - return getLogrusLogger(ctx, keys...).WithFields(lfields) -} - -// GetLogger returns the logger from the current context, if present. If one -// or more keys are provided, they will be resolved on the context and -// included in the logger. While context.Value takes an interface, any key -// argument passed to GetLogger will be passed to fmt.Sprint when expanded as -// a logging key field. If context keys are integer constants, for example, -// its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...) -} - -// GetLogrusLogger returns the logrus logger for the context. If one more keys -// are provided, they will be resolved on the context and included in the -// logger. Only use this function if specific logrus functionality is -// required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { - var logger *logrus.Entry - - // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") - if loggerInterface != nil { - if lgr, ok := loggerInterface.(*logrus.Entry); ok { - logger = lgr - } - } - - if logger == nil { - fields := logrus.Fields{} - - // Fill in the instance id, if we have it. - instanceID := ctx.Value("instance.id") - if instanceID != nil { - fields["instance.id"] = instanceID - } - - fields["go.version"] = runtime.Version() - // If no logger is found, just return the standard logger. - logger = logrus.StandardLogger().WithFields(fields) - } - - fields := logrus.Fields{} - for _, key := range keys { - v := ctx.Value(key) - if v != nil { - fields[fmt.Sprint(key)] = v - } - } - - return logger.WithFields(fields) -} diff --git a/vendor/src/github.com/docker/distribution/context/trace.go b/vendor/src/github.com/docker/distribution/context/trace.go deleted file mode 100644 index 721964a84..000000000 --- a/vendor/src/github.com/docker/distribution/context/trace.go +++ /dev/null @@ -1,104 +0,0 @@ -package context - -import ( - "runtime" - "time" - - "github.com/docker/distribution/uuid" -) - -// WithTrace allocates a traced timing span in a new context. This allows a -// caller to track the time between calling WithTrace and the returned done -// function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapsed time and a -// "trace.func" field, corresponding to the function that called WithTrace. -// -// The logging keys "trace.id" and "trace.parent.id" are provided to implement -// dapper-like tracing. This function should be complemented with a WithSpan -// method that could be used for tracing distributed RPC calls. -// -// The main benefit of this function is to post-process log messages or -// intercept them in a hook to provide timing data. Trace ids and parent ids -// can also be linked to provide call tracing, if so required. -// -// Here is an example of the usage: -// -// func timedOperation(ctx Context) { -// ctx, done := WithTrace(ctx) -// defer done("this will be the log message") -// // ... function body ... -// } -// -// If the function ran for roughly 1s, such a usage would emit a log message -// as follows: -// -// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... -// -// Notice that the function name is automatically resolved, along with the -// package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { - if ctx == nil { - ctx = Background() - } - - pc, file, line, _ := runtime.Caller(1) - f := runtime.FuncForPC(pc) - ctx = &traced{ - Context: ctx, - id: uuid.Generate().String(), - start: time.Now(), - parent: GetStringValue(ctx, "trace.id"), - fnname: f.Name(), - file: file, - line: line, - } - - return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, - "trace.duration", - "trace.id", - "trace.parent.id", - "trace.func", - "trace.file", - "trace.line"). - Debugf(format, a...) - } -} - -// traced represents a context that is traced for function call timing. It -// also provides fast lookup for the various attributes that are available on -// the trace. -type traced struct { - Context - id string - parent string - start time.Time - fnname string - file string - line int -} - -func (ts *traced) Value(key interface{}) interface{} { - switch key { - case "trace.start": - return ts.start - case "trace.duration": - return time.Since(ts.start) - case "trace.id": - return ts.id - case "trace.parent.id": - if ts.parent == "" { - return nil // must return nil to signal no parent. - } - - return ts.parent - case "trace.func": - return ts.fnname - case "trace.file": - return ts.file - case "trace.line": - return ts.line - } - - return ts.Context.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/util.go b/vendor/src/github.com/docker/distribution/context/util.go deleted file mode 100644 index cb9ef52e3..000000000 --- a/vendor/src/github.com/docker/distribution/context/util.go +++ /dev/null @@ -1,24 +0,0 @@ -package context - -import ( - "time" -) - -// Since looks up key, which should be a time.Time, and returns the duration -// since that time. If the key is not found, the value returned will be zero. -// This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { - if startedAt, ok := ctx.Value(key).(time.Time); ok { - return time.Since(startedAt) - } - return 0 -} - -// GetStringValue returns a string value from the context. The empty string -// will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { - if valuev, ok := ctx.Value(key).(string); ok { - value = valuev - } - return value -} diff --git a/vendor/src/github.com/docker/distribution/context/version.go b/vendor/src/github.com/docker/distribution/context/version.go deleted file mode 100644 index 746cda02e..000000000 --- a/vendor/src/github.com/docker/distribution/context/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package context - -// WithVersion stores the application version in the context. The new context -// gets a logger to ensure log messages are marked with the application -// version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) - // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) -} - -// GetVersion returns the application version from the context. An empty -// string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") -} diff --git a/vendor/src/github.com/docker/distribution/coverpkg.sh b/vendor/src/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae8..000000000 --- a/vendor/src/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/src/github.com/docker/distribution/digest/digest.go b/vendor/src/github.com/docker/distribution/digest/digest.go deleted file mode 100644 index 31d821bba..000000000 --- a/vendor/src/github.com/docker/distribution/digest/digest.go +++ /dev/null @@ -1,139 +0,0 @@ -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) -} - -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. -func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - d := Digest(s) - - return d, d.Validate() -} - -// FromReader returns the most valid digest for the underlying content using -// the canonical digest algorithm. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - - i := strings.Index(s, ":") - if i < 0 { - return ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch algorithm := Algorithm(s[:i]); algorithm { - case SHA256, SHA384, SHA512: - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - break - default: - return ErrDigestUnsupported - } - - return nil -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Hex returns the hex digest portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Hex() string { - return string(d[d.sepIndex()+1:]) -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic("could not find ':' in digest: " + d) - } - - return i -} diff --git a/vendor/src/github.com/docker/distribution/digest/digester.go b/vendor/src/github.com/docker/distribution/digest/digester.go deleted file mode 100644 index f3105a45b..000000000 --- a/vendor/src/github.com/docker/distribution/digest/digester.go +++ /dev/null @@ -1,155 +0,0 @@ -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, New and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - return nil -} - -// New returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling New. -func (a Algorithm) New() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.New() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.New() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// TODO(stevvooe): Allow resolution of verifiers using the digest type and -// this registration system. - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/src/github.com/docker/distribution/digest/doc.go b/vendor/src/github.com/docker/distribution/digest/doc.go deleted file mode 100644 index f64b0db32..000000000 --- a/vendor/src/github.com/docker/distribution/digest/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/src/github.com/docker/distribution/digest/set.go b/vendor/src/github.com/docker/distribution/digest/set.go deleted file mode 100644 index 4b9313c1a..000000000 --- a/vendor/src/github.com/docker/distribution/digest/set.go +++ /dev/null @@ -1,245 +0,0 @@ -package digest - -import ( - "errors" - "sort" - "strings" - "sync" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg Algorithm - hex string - ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg Algorithm - val string - digest Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/src/github.com/docker/distribution/digest/verifiers.go b/vendor/src/github.com/docker/distribution/digest/verifiers.go deleted file mode 100644 index 9af3be134..000000000 --- a/vendor/src/github.com/docker/distribution/digest/verifiers.go +++ /dev/null @@ -1,44 +0,0 @@ -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - }, nil -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/src/github.com/docker/distribution/doc.go b/vendor/src/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708..000000000 --- a/vendor/src/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/src/github.com/docker/distribution/errors.go b/vendor/src/github.com/docker/distribution/errors.go deleted file mode 100644 index c20f28113..000000000 --- a/vendor/src/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/doc.go b/vendor/src/github.com/docker/distribution/manifest/doc.go deleted file mode 100644 index 88367b0a0..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/doc.go +++ /dev/null @@ -1 +0,0 @@ -package manifest diff --git a/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go deleted file mode 100644 index a2082ec02..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go +++ /dev/null @@ -1,155 +0,0 @@ -package manifestlist - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -// MediaTypeManifestList specifies the mediaType for manifest lists. -const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - -// SchemaVersion provides a pre-initialized version structure for this -// packages version of the manifest. -var SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: MediaTypeManifestList, -} - -func init() { - manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifestList) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// PlatformSpec specifies a platform where a particular image manifest is -// applicable. -type PlatformSpec struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example `10.0.10586`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `ppc64le` to specify a little-endian version of a PowerPC CPU. - Variant string `json:"variant,omitempty"` - - // Features is an optional field specifying an array of strings, each - // listing a required CPU feature (for example `sse4` or `aes`). - Features []string `json:"features,omitempty"` -} - -// A ManifestDescriptor references a platform-specific manifest. -type ManifestDescriptor struct { - distribution.Descriptor - - // Platform specifies which platform the manifest pointed to by the - // descriptor runs on. - Platform PlatformSpec `json:"platform"` -} - -// ManifestList references manifests for various platforms. -type ManifestList struct { - manifest.Versioned - - // Config references the image configuration as a blob. - Manifests []ManifestDescriptor `json:"manifests"` -} - -// References returnes the distribution descriptors for the referenced image -// manifests. -func (m ManifestList) References() []distribution.Descriptor { - dependencies := make([]distribution.Descriptor, len(m.Manifests)) - for i := range m.Manifests { - dependencies[i] = m.Manifests[i].Descriptor - } - - return dependencies -} - -// DeserializedManifestList wraps ManifestList with a copy of the original -// JSON. -type DeserializedManifestList struct { - ManifestList - - // canonical is the canonical byte representation of the Manifest. - canonical []byte -} - -// FromDescriptors takes a slice of descriptors, and returns a -// DeserializedManifestList which contains the resulting manifest list -// and its JSON representation. -func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - m := ManifestList{ - Versioned: SchemaVersion, - } - - m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) - copy(m.Manifests, descriptors) - - deserialized := DeserializedManifestList{ - ManifestList: m, - } - - var err error - deserialized.canonical, err = json.MarshalIndent(&m, "", " ") - return &deserialized, err -} - -// UnmarshalJSON populates a new ManifestList struct from JSON data. -func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) - // store manifest list in canonical - copy(m.canonical, b) - - // Unmarshal canonical JSON into ManifestList object - var manifestList ManifestList - if err := json.Unmarshal(m.canonical, &manifestList); err != nil { - return err - } - - m.ManifestList = manifestList - - return nil -} - -// MarshalJSON returns the contents of canonical. If canonical is empty, -// marshals the inner contents. -func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { - if len(m.canonical) > 0 { - return m.canonical, nil - } - - return nil, errors.New("JSON representation not initialized in DeserializedManifestList") -} - -// Payload returns the raw content of the manifest list. The contents can be -// used to calculate the content identifier. -func (m DeserializedManifestList) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go deleted file mode 100644 index 5cdd76796..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go +++ /dev/null @@ -1,283 +0,0 @@ -package schema1 - -import ( - "crypto/sha512" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -type diffID digest.Digest - -// gzippedEmptyTar is a gzip-compressed version of an empty tar file -// (1024 NULL bytes) -var gzippedEmptyTar = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// digestSHA256GzippedEmptyTar is the canonical sha256 digest of -// gzippedEmptyTar -const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -// configManifestBuilder is a type for constructing manifests from an image -// configuration and generic descriptors. -type configManifestBuilder struct { - // bs is a BlobService used to create empty layer tars in the - // blob store if necessary. - bs distribution.BlobService - // pk is the libtrust private key used to sign the final manifest. - pk libtrust.PrivateKey - // configJSON is configuration supplied when the ManifestBuilder was - // created. - configJSON []byte - // ref contains the name and optional tag provided to NewConfigManifestBuilder. - ref reference.Named - // descriptors is the set of descriptors referencing the layers. - descriptors []distribution.Descriptor - // emptyTarDigest is set to a valid digest if an empty tar has been - // put in the blob store; otherwise it is empty. - emptyTarDigest digest.Digest -} - -// NewConfigManifestBuilder is used to build new manifests for the current -// schema version from an image configuration and a set of descriptors. -// It takes a BlobService so that it can add an empty tar to the blob store -// if the resulting manifest needs empty layers. -func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { - return &configManifestBuilder{ - bs: bs, - pk: pk, - configJSON: configJSON, - ref: ref, - } -} - -// Build produces a final manifest from the given references -func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { - type imageRootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` - } - - type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` - } - - type imageConfig struct { - RootFS *imageRootFS `json:"rootfs,omitempty"` - History []imageHistory `json:"history,omitempty"` - Architecture string `json:"architecture,omitempty"` - } - - var img imageConfig - - if err := json.Unmarshal(mb.configJSON, &img); err != nil { - return nil, err - } - - if len(img.History) == 0 { - return nil, errors.New("empty history when trying to create schema1 manifest") - } - - if len(img.RootFS.DiffIDs) != len(mb.descriptors) { - return nil, errors.New("number of descriptors and number of layers in rootfs must match") - } - - // Generate IDs for each layer - // For non-top-level layers, create fake V1Compatibility strings that - // fit the format and don't collide with anything else, but don't - // result in runnable images on their own. - type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` - } - - fsLayerList := make([]FSLayer, len(img.History)) - history := make([]History, len(img.History)) - - parent := "" - layerCounter := 0 - for i, h := range img.History[:len(img.History)-1] { - var blobsum digest.Digest - if h.EmptyLayer { - if blobsum, err = mb.emptyTar(ctx); err != nil { - return nil, err - } - } else { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, errors.New("too many non-empty layers in History section") - } - blobsum = mb.descriptors[layerCounter].Digest - layerCounter++ - } - - v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() - - if i == 0 && img.RootFS.BaseLayer != "" { - // windows-only baselayer setup - baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) - parent = fmt.Sprintf("%x", baseID[:32]) - } - - v1Compatibility := v1Compatibility{ - ID: v1ID, - Parent: parent, - Comment: h.Comment, - Created: h.Created, - Author: h.Author, - } - v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} - if h.EmptyLayer { - v1Compatibility.ThrowAway = true - } - jsonBytes, err := json.Marshal(&v1Compatibility) - if err != nil { - return nil, err - } - - reversedIndex := len(img.History) - i - 1 - history[reversedIndex].V1Compatibility = string(jsonBytes) - fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} - - parent = v1ID - } - - latestHistory := img.History[len(img.History)-1] - - var blobsum digest.Digest - if latestHistory.EmptyLayer { - if blobsum, err = mb.emptyTar(ctx); err != nil { - return nil, err - } - } else { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, errors.New("too many non-empty layers in History section") - } - blobsum = mb.descriptors[layerCounter].Digest - } - - fsLayerList[0] = FSLayer{BlobSum: blobsum} - dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) - - // Top-level v1compatibility string should be a modified version of the - // image config. - transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) - if err != nil { - return nil, err - } - - history[0].V1Compatibility = string(transformedConfig) - - tag := "" - if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { - tag = tagged.Tag() - } - - mfst := Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: mb.ref.Name(), - Tag: tag, - Architecture: img.Architecture, - FSLayers: fsLayerList, - History: history, - } - - return Sign(&mfst, mb.pk) -} - -// emptyTar pushes a compressed empty tar to the blob store if one doesn't -// already exist, and returns its blobsum. -func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { - if mb.emptyTarDigest != "" { - // Already put an empty tar - return mb.emptyTarDigest, nil - } - - descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) - switch err { - case nil: - mb.emptyTarDigest = descriptor.Digest - return descriptor.Digest, nil - case distribution.ErrBlobUnknown: - // nop - default: - return "", err - } - - // Add gzipped empty tar to the blob store - descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) - if err != nil { - return "", err - } - - mb.emptyTarDigest = descriptor.Digest - - return descriptor.Digest, nil -} - -// AppendReference adds a reference to the current ManifestBuilder -func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { - // todo: verification here? - mb.descriptors = append(mb.descriptors, d.Descriptor()) - return nil -} - -// References returns the current references added to this builder -func (mb *configManifestBuilder) References() []distribution.Descriptor { - return mb.descriptors -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON -func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(configJSON, &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - delete(configAsMap, "rootfs") - delete(configAsMap, "history") - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go deleted file mode 100644 index bff47bde0..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go +++ /dev/null @@ -1,184 +0,0 @@ -package schema1 - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" -) - -const ( - // MediaTypeManifest specifies the mediaType for the current version. Note - // that for schema version 1, the the media is optionally "application/json". - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" - // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version - MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // MediaTypeManifestLayer specifies the media type for manifest layers - MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" -) - -var ( - // SchemaVersion provides a pre-initialized version structure for this - // packages version of the manifest. - SchemaVersion = manifest.Versioned{ - SchemaVersion: 1, - } -) - -func init() { - schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - sm := new(SignedManifest) - err := sm.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - desc := distribution.Descriptor{ - Digest: digest.FromBytes(sm.Canonical), - Size: int64(len(sm.Canonical)), - MediaType: MediaTypeSignedManifest, - } - return sm, desc, err - } - err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } - err = distribution.RegisterManifestSchema("", schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } - err = distribution.RegisterManifestSchema("application/json", schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// History stores unstructured v1 compatibility information -type History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// Manifest provides the base accessible fields for working with V2 image -// format in the registry. -type Manifest struct { - manifest.Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []History `json:"history"` -} - -// SignedManifest provides an envelope for a signed image manifest, including -// the format sensitive raw bytes. -type SignedManifest struct { - Manifest - - // Canonical is the canonical byte representation of the ImageManifest, - // without any attached signatures. The manifest byte - // representation cannot change or it will have to be re-signed. - Canonical []byte `json:"-"` - - // all contains the byte representation of the Manifest including signatures - // and is returned by Payload() - all []byte -} - -// UnmarshalJSON populates a new SignedManifest struct from JSON data. -func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.all = make([]byte, len(b), len(b)) - // store manifest and signatures in all - copy(sm.all, b) - - jsig, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return err - } - - // Resolve the payload in the manifest. - bytes, err := jsig.Payload() - if err != nil { - return err - } - - // sm.Canonical stores the canonical manifest JSON - sm.Canonical = make([]byte, len(bytes), len(bytes)) - copy(sm.Canonical, bytes) - - // Unmarshal canonical JSON into Manifest object - var manifest Manifest - if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { - return err - } - - sm.Manifest = manifest - - return nil -} - -// References returnes the descriptors of this manifests references -func (sm SignedManifest) References() []distribution.Descriptor { - dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) - for i, fsLayer := range sm.FSLayers { - dependencies[i] = distribution.Descriptor{ - MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", - Digest: fsLayer.BlobSum, - } - } - - return dependencies - -} - -// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner -// contents. Applications requiring a marshaled signed manifest should simply -// use Raw directly, since the the content produced by json.Marshal will be -// compacted and will fail signature checks. -func (sm *SignedManifest) MarshalJSON() ([]byte, error) { - if len(sm.all) > 0 { - return sm.all, nil - } - - // If the raw data is not available, just dump the inner content. - return json.Marshal(&sm.Manifest) -} - -// Payload returns the signed content of the signed manifest. -func (sm SignedManifest) Payload() (string, []byte, error) { - return MediaTypeSignedManifest, sm.all, nil -} - -// Signatures returns the signatures as provided by -// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws -// signatures. -func (sm *SignedManifest) Signatures() ([][]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Signatures() -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go deleted file mode 100644 index fc1045f9e..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go +++ /dev/null @@ -1,98 +0,0 @@ -package schema1 - -import ( - "fmt" - - "errors" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" -) - -// referenceManifestBuilder is a type for constructing manifests from schema1 -// dependencies. -type referenceManifestBuilder struct { - Manifest - pk libtrust.PrivateKey -} - -// NewReferenceManifestBuilder is used to build new manifests for the current -// schema version using schema1 dependencies. -func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { - tag := "" - if tagged, isTagged := ref.(reference.Tagged); isTagged { - tag = tagged.Tag() - } - - return &referenceManifestBuilder{ - Manifest: Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: ref.Name(), - Tag: tag, - Architecture: architecture, - }, - pk: pk, - } -} - -func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { - m := mb.Manifest - if len(m.FSLayers) == 0 { - return nil, errors.New("cannot build manifest with zero layers or history") - } - - m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) - m.History = make([]History, len(mb.Manifest.History)) - copy(m.FSLayers, mb.Manifest.FSLayers) - copy(m.History, mb.Manifest.History) - - return Sign(&m, mb.pk) -} - -// AppendReference adds a reference to the current ManifestBuilder -func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { - r, ok := d.(Reference) - if !ok { - return fmt.Errorf("Unable to add non-reference type to v1 builder") - } - - // Entries need to be prepended - mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) - mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) - return nil - -} - -// References returns the current references added to this builder -func (mb *referenceManifestBuilder) References() []distribution.Descriptor { - refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) - for i := range mb.Manifest.FSLayers { - layerDigest := mb.Manifest.FSLayers[i].BlobSum - history := mb.Manifest.History[i] - ref := Reference{layerDigest, 0, history} - refs[i] = ref.Descriptor() - } - return refs -} - -// Reference describes a manifest v2, schema version 1 dependency. -// An FSLayer associated with a history entry. -type Reference struct { - Digest digest.Digest - Size int64 // if we know it, set it for the descriptor. - History History -} - -// Descriptor describes a reference -func (r Reference) Descriptor() distribution.Descriptor { - return distribution.Descriptor{ - MediaType: MediaTypeManifestLayer, - Digest: r.Digest, - Size: r.Size, - } -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go deleted file mode 100644 index c862dd812..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go +++ /dev/null @@ -1,68 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - "encoding/json" - - "github.com/docker/libtrust" -) - -// Sign signs the manifest with the provided private key, returning a -// SignedManifest. This typically won't be used within the registry, except -// for testing. -func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.Sign(pk); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - all: pretty, - Canonical: p, - }, nil -} - -// SignWithChain signs the manifest with the given private key and x509 chain. -// The public key of the first element in the chain must be the public key -// corresponding with the sign key. -func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.SignWithChain(key, chain); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - all: pretty, - Canonical: p, - }, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go deleted file mode 100644 index fa8daa56f..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go +++ /dev/null @@ -1,32 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - - "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" -) - -// Verify verifies the signature of the signed manifest returning the public -// keys used during signing. -func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { - js, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") - return nil, err - } - - return js.Verify() -} - -// VerifyChains verifies the signature of the signed manifest against the -// certificate pool returning the list of verified chains. Signatures without -// an x509 chain are not checked. -func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { - js, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - return nil, err - } - - return js.VerifyChains(ca) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go deleted file mode 100644 index ec0bf858d..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go +++ /dev/null @@ -1,80 +0,0 @@ -package schema2 - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -// builder is a type for constructing manifests. -type builder struct { - // bs is a BlobService used to publish the configuration blob. - bs distribution.BlobService - - // configJSON references - configJSON []byte - - // layers is a list of layer descriptors that gets built by successive - // calls to AppendReference. - layers []distribution.Descriptor -} - -// NewManifestBuilder is used to build new manifests for the current schema -// version. It takes a BlobService so it can publish the configuration blob -// as part of the Build process. -func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { - mb := &builder{ - bs: bs, - configJSON: make([]byte, len(configJSON)), - } - copy(mb.configJSON, configJSON) - - return mb -} - -// Build produces a final manifest from the given references. -func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { - m := Manifest{ - Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.layers)), - } - copy(m.Layers, mb.layers) - - configDigest := digest.FromBytes(mb.configJSON) - - var err error - m.Config, err = mb.bs.Stat(ctx, configDigest) - switch err { - case nil: - // Override MediaType, since Put always replaces the specified media - // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig - return FromStruct(m) - case distribution.ErrBlobUnknown: - // nop - default: - return nil, err - } - - // Add config to the blob store - m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) - // Override MediaType, since Put always replaces the specified media - // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig - if err != nil { - return nil, err - } - - return FromStruct(m) -} - -// AppendReference adds a reference to the current ManifestBuilder. -func (mb *builder) AppendReference(d distribution.Describable) error { - mb.layers = append(mb.layers, d.Descriptor()) - return nil -} - -// References returns the current references added to this builder. -func (mb *builder) References() []distribution.Descriptor { - return mb.layers -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go deleted file mode 100644 index 355b5ad4e..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go +++ /dev/null @@ -1,128 +0,0 @@ -package schema2 - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -const ( - // MediaTypeManifest specifies the mediaType for the current version. - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - - // MediaTypeConfig specifies the mediaType for the image configuration. - MediaTypeConfig = "application/vnd.docker.container.image.v1+json" - - // MediaTypeLayer is the mediaType used for layers referenced by the - // manifest. - MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - - // MediaTypeForeignLayer is the mediaType used for layers that must be - // downloaded from foreign URLs. - MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -var ( - // SchemaVersion provides a pre-initialized version structure for this - // packages version of the manifest. - SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: MediaTypeManifest, - } -) - -func init() { - schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifest) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// Manifest defines a schema2 manifest. -type Manifest struct { - manifest.Versioned - - // Config references the image configuration as a blob. - Config distribution.Descriptor `json:"config"` - - // Layers lists descriptors for the layers referenced by the - // configuration. - Layers []distribution.Descriptor `json:"layers"` -} - -// References returnes the descriptors of this manifests references. -func (m Manifest) References() []distribution.Descriptor { - return m.Layers -} - -// Target returns the target of this signed manifest. -func (m Manifest) Target() distribution.Descriptor { - return m.Config -} - -// DeserializedManifest wraps Manifest with a copy of the original JSON. -// It satisfies the distribution.Manifest interface. -type DeserializedManifest struct { - Manifest - - // canonical is the canonical byte representation of the Manifest. - canonical []byte -} - -// FromStruct takes a Manifest structure, marshals it to JSON, and returns a -// DeserializedManifest which contains the manifest and its JSON representation. -func FromStruct(m Manifest) (*DeserializedManifest, error) { - var deserialized DeserializedManifest - deserialized.Manifest = m - - var err error - deserialized.canonical, err = json.MarshalIndent(&m, "", " ") - return &deserialized, err -} - -// UnmarshalJSON populates a new Manifest struct from JSON data. -func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) - // store manifest in canonical - copy(m.canonical, b) - - // Unmarshal canonical JSON into Manifest object - var manifest Manifest - if err := json.Unmarshal(m.canonical, &manifest); err != nil { - return err - } - - m.Manifest = manifest - - return nil -} - -// MarshalJSON returns the contents of canonical. If canonical is empty, -// marshals the inner contents. -func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { - if len(m.canonical) > 0 { - return m.canonical, nil - } - - return nil, errors.New("JSON representation not initialized in DeserializedManifest") -} - -// Payload returns the raw content of the manifest. The contents can be used to -// calculate the content identifier. -func (m DeserializedManifest) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/versioned.go b/vendor/src/github.com/docker/distribution/manifest/versioned.go deleted file mode 100644 index c57398bde..000000000 --- a/vendor/src/github.com/docker/distribution/manifest/versioned.go +++ /dev/null @@ -1,12 +0,0 @@ -package manifest - -// Versioned provides a struct with the manifest schemaVersion and . Incoming -// content with unknown schema version can be decoded against this struct to -// check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` -} diff --git a/vendor/src/github.com/docker/distribution/manifests.go b/vendor/src/github.com/docker/distribution/manifests.go deleted file mode 100644 index 2ac7c8f21..000000000 --- a/vendor/src/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,117 +0,0 @@ -package distribution - -import ( - "fmt" - "mime" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // The references are strictly ordered from base to head. A reference - // is anything which can be represented by a distribution.Descriptor - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the mediatype. - Payload() (mediatype string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediatype string - if ctHeader != "" { - var err error - mediatype, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediatype] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { - if _, ok := mappings[mediatype]; ok { - return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) - } - mappings[mediatype] = u - return nil -} diff --git a/vendor/src/github.com/docker/distribution/reference/reference.go b/vendor/src/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index bb09fa25d..000000000 --- a/vendor/src/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,334 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -package reference - -import ( - "errors" - "fmt" - - "github.com/docker/distribution/digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - // TODO(dmcgowan): Provide more specific and helpful error - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - ref := reference{ - name: matches[1], - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.ParseDigest(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - if !anchoredNameRegexp.MatchString(name) { - return nil, ErrReferenceInvalidFormat - } - return repository(name), nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - return taggedReference{ - name: name.Name(), - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - return canonicalReference{ - name: name.Name(), - digest: digest, - }, nil -} - -func getBestReferenceType(ref reference) Reference { - if ref.name == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - name: ref.name, - digest: ref.digest, - } - } - return repository(ref.name) - } - if ref.digest == "" { - return taggedReference{ - name: ref.name, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - name string - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository string - -func (r repository) String() string { - return string(r) -} - -func (r repository) Name() string { - return string(r) -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return d.String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - name string - tag string -} - -func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - name string - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/src/github.com/docker/distribution/reference/regexp.go b/vendor/src/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 9a7d366bc..000000000 --- a/vendor/src/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,124 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp - // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // hostnameRegexp defines the structure of potential hostname components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. - anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/src/github.com/docker/distribution/registry.go b/vendor/src/github.com/docker/distribution/registry.go deleted file mode 100644 index 1ede31ebb..000000000 --- a/vendor/src/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,97 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index 49a64a86e..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index fc42c1c41..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1569 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` - - unauthorizedErrorsBody = `{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde011959..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 5b80d5be7..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index a959aaa89..000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,251 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: - scheme = "http" - } - - host := r.Host - forwardedHost := r.Header.Get("X-Forwarded-Host") - if len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d957..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go deleted file mode 100644 index c8cd83bb9..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go +++ /dev/null @@ -1,220 +0,0 @@ -package auth - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// ChallengeManager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type ChallengeManager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleChallengeManager returns an instance of -// ChallengeManger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleChallengeManager() ChallengeManager { - return simpleChallengeManager{} -} - -type simpleChallengeManager map[string][]Challenge - -func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - endpoint.Host = strings.ToLower(endpoint.Host) - - challenges := m[endpoint.String()] - return challenges, nil -} - -func (m simpleChallengeManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: strings.ToLower(resp.Request.URL.Host), - Scheme: resp.Request.URL.Scheme, - } - m[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index d03d8ff0e..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,497 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges ChallengeManager - handlers []AuthenticationHandler - transport http.RoundTripper -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, challenge := range challenges { - if challenge.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - header http.Header - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index e3ffcb00f..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/errors.go b/vendor/src/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index f73e3c230..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.StatusCode, resp.Body) - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/repository.go b/vendor/src/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 973125561..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,863 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - u, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.ParseDigest(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - req, err := http.NewRequest("HEAD", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - var attempts int - resp, err := t.client.Do(req) -check: - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - return descriptorFromResponse(resp) - case resp.StatusCode == http.StatusMethodNotAllowed: - req, err = http.NewRequest("GET", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - resp, err = t.client.Do(req) - attempts++ - if attempts > 1 { - return distribution.Descriptor{}, err - } - goto check - default: - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - ) - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - } else if opt, ok := option.(contentDigestOption); ok { - contentDgst = opt.digest - } else { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.ParseDigest(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.New() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index e1b17a03a..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,250 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0..000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a390919..000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index 94ca8a90c..000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 68a68f081..000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,170 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if rsimbdcp.repository == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - if rsimbdcp.repository == nil { - return distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if rsimbdcp.repository == nil { - // allocate map since we are setting it now. - rsimbdcp.parent.mu.Lock() - var ok bool - // have to read back value since we may have allocated elsewhere. - rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - rsimbdcp.repository = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository - } - - rsimbdcp.parent.mu.Unlock() - } - - if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/src/github.com/docker/distribution/tags.go b/vendor/src/github.com/docker/distribution/tags.go deleted file mode 100644 index 503056596..000000000 --- a/vendor/src/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/src/github.com/docker/distribution/uuid/uuid.go b/vendor/src/github.com/docker/distribution/uuid/uuid.go deleted file mode 100644 index d433ccaf5..000000000 --- a/vendor/src/github.com/docker/distribution/uuid/uuid.go +++ /dev/null @@ -1,126 +0,0 @@ -// Package uuid provides simple UUID generation. Only version 4 style UUIDs -// can be generated. -// -// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. -package uuid - -import ( - "crypto/rand" - "fmt" - "io" - "os" - "syscall" - "time" -) - -const ( - // Bits is the number of bits in a UUID - Bits = 128 - - // Size is the number of bytes in a UUID - Size = Bits / 8 - - format = "%08x-%04x-%04x-%04x-%012x" -) - -var ( - // ErrUUIDInvalid indicates a parsed string is not a valid uuid. - ErrUUIDInvalid = fmt.Errorf("invalid uuid") - - // Loggerf can be used to override the default logging destination. Such - // log messages in this library should be logged at warning or higher. - Loggerf = func(format string, args ...interface{}) {} -) - -// UUID represents a UUID value. UUIDs can be compared and set to other values -// and accessed by byte. -type UUID [Size]byte - -// Generate creates a new, version 4 uuid. -func Generate() (u UUID) { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - ) - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - Loggerf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - u[6] = (u[6] & 0x0f) | 0x40 // set version byte - u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} - - return u -} - -// Parse attempts to extract a uuid from the string or returns an error. -func Parse(s string) (u UUID, err error) { - if len(s) != 36 { - return UUID{}, ErrUUIDInvalid - } - - // create stack addresses for each section of the uuid. - p := make([][]byte, 5) - - if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { - return u, err - } - - copy(u[0:4], p[0]) - copy(u[4:6], p[1]) - copy(u[6:8], p[2]) - copy(u[8:10], p[3]) - copy(u[10:16], p[4]) - - return -} - -func (u UUID) String() string { - return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/LICENSE b/vendor/src/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2a..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/docker/docker-credential-helpers/client/client.go b/vendor/src/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index ddd30bbc8..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, credentials *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(credentials); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentails from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/client/command.go b/vendor/src/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 8983da694..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: exec.Command(name, args...)} - } -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index b14f49566..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,129 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - return helper.Delete(serverURL) -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index d24bf16f0..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,37 +0,0 @@ -package credentials - -// ErrCredentialsNotFound standarizes the not found error, so every helper returns -// the same message and docker can handle it properly. -const errCredentialsNotFoundMessage = "credentials not found in native keychain" - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 8a6967144..000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,12 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) -} diff --git a/vendor/src/github.com/docker/go-connections/LICENSE b/vendor/src/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/src/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/go-connections/nat/nat.go b/vendor/src/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bca3c2c99..000000000 --- a/vendor/src/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,243 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - if len(portStr) == 0 { - return 0 - } - - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := strconv.ParseUint(portStr, 10, 16) - return int(port) -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) - if err != nil { - return nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", rawIP) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: rawIP, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/src/github.com/docker/go-connections/nat/parse.go b/vendor/src/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 872050205..000000000 --- a/vendor/src/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,56 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/src/github.com/docker/go-connections/nat/sort.go b/vendor/src/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e..000000000 --- a/vendor/src/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/README.md b/vendor/src/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go deleted file mode 100644 index 99846ffdd..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go +++ /dev/null @@ -1,81 +0,0 @@ -package sockets - -import ( - "errors" - "net" - "sync" -) - -var errClosed = errors.New("use of closed network connection") - -// InmemSocket implements net.Listener using in-memory only connections. -type InmemSocket struct { - chConn chan net.Conn - chClose chan struct{} - addr string - mu sync.Mutex -} - -// dummyAddr is used to satisfy net.Addr for the in-mem socket -// it is just stored as a string and returns the string for all calls -type dummyAddr string - -// NewInmemSocket creates an in-memory only net.Listener -// The addr argument can be any string, but is used to satisfy the `Addr()` part -// of the net.Listener interface -func NewInmemSocket(addr string, bufSize int) *InmemSocket { - return &InmemSocket{ - chConn: make(chan net.Conn, bufSize), - chClose: make(chan struct{}), - addr: addr, - } -} - -// Addr returns the socket's addr string to satisfy net.Listener -func (s *InmemSocket) Addr() net.Addr { - return dummyAddr(s.addr) -} - -// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. -func (s *InmemSocket) Accept() (net.Conn, error) { - select { - case conn := <-s.chConn: - return conn, nil - case <-s.chClose: - return nil, errClosed - } -} - -// Close closes the listener. It will be unavailable for use once closed. -func (s *InmemSocket) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - select { - case <-s.chClose: - default: - close(s.chClose) - } - return nil -} - -// Dial is used to establish a connection with the in-mem server -func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { - srvConn, clientConn := net.Pipe() - select { - case s.chConn <- srvConn: - case <-s.chClose: - return nil, errClosed - } - - return clientConn, nil -} - -// Network returns the addr string, satisfies net.Addr -func (a dummyAddr) Network() string { - return string(a) -} - -// String returns the string form -func (a dummyAddr) String() string { - return string(a) -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/proxy.go b/vendor/src/github.com/docker/go-connections/sockets/proxy.go deleted file mode 100644 index 98e9a1dc6..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/proxy.go +++ /dev/null @@ -1,51 +0,0 @@ -package sockets - -import ( - "net" - "net/url" - "os" - "strings" - - "golang.org/x/net/proxy" -) - -// GetProxyEnv allows access to the uppercase and the lowercase forms of -// proxy-related variables. See the Go specification for details on these -// variables. https://golang.org/pkg/net/http/ -func GetProxyEnv(key string) string { - proxyValue := os.Getenv(strings.ToUpper(key)) - if proxyValue == "" { - return os.Getenv(strings.ToLower(key)) - } - return proxyValue -} - -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets.go b/vendor/src/github.com/docker/go-connections/sockets/sockets.go deleted file mode 100644 index 1739cecf2..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "net" - "net/http" - "time" -) - -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - -// ConfigureTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) or npipe the -// compression is disabled. -func ConfigureTransport(tr *http.Transport, proto, addr string) error { - switch proto { - case "unix": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } - case "npipe": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } - default: - tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial - } - return nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go deleted file mode 100644 index b255ac9ac..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package sockets - -import ( - "net" - "syscall" - "time" -) - -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { - return nil, syscall.EAFNOSUPPORT -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go deleted file mode 100644 index 1f3540b2f..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package sockets - -import ( - "net" - "time" - - "github.com/Microsoft/go-winio" -) - -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(addr, &timeout) -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go deleted file mode 100644 index 8a82727df..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "crypto/tls" - "net" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go deleted file mode 100644 index d1627349f..000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux freebsd solaris - -package sockets - -import ( - "fmt" - "net" - "os" - "strconv" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/user" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path, group string) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := setSocketGroup(path, group); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} - -func setSocketGroup(path, group string) error { - if group == "" { - return nil - } - if err := changeGroup(path, group); err != nil { - if group != "docker" { - return err - } - logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) - } - return nil -} - -func changeGroup(path string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(path, 0, gid) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - logrus.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 1ba04395e..000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, -} - -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - logrus.Debugf("Trusting %d certs", len(certPool.Subjects())) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" || options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return &tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return &tlsConfig, nil -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c0..000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47c..000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/src/github.com/docker/go-events/.gitignore b/vendor/src/github.com/docker/go-events/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/src/github.com/docker/go-events/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/src/github.com/docker/go-events/LICENSE b/vendor/src/github.com/docker/go-events/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/src/github.com/docker/go-events/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/go-events/README.md b/vendor/src/github.com/docker/go-events/README.md deleted file mode 100644 index 9e3625da9..000000000 --- a/vendor/src/github.com/docker/go-events/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Docker Events Package - -[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) -[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) - -The Docker `events` package implements a composable event distribution package -for Go. - -Originally created to implement the [notifications in Docker Registry -2](https://github.com/docker/distribution/blob/master/docs/notifications.md), -we've found the pattern to be useful in other applications. This package is -most of the same code with slightly updated interfaces. Much of the internals -have been made available. - -## Usage - -The `events` package centers around a `Sink` type. Events are written with -calls to `Sink.Write(event Event)`. Sinks can be wired up in various -configurations to achieve interesting behavior. - -The canonical example is that employed by the -[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) -package. Let's say we have a type `httpSink` where we'd like to queue -notifications. As a rule, it should send a single http request and return an -error if it fails: - -```go -func (h *httpSink) Write(event Event) error { - p, err := json.Marshal(event) - if err != nil { - return err - } - body := bytes.NewReader(p) - resp, err := h.client.Post(h.url, "application/json", body) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.Status != 200 { - return errors.New("unexpected status") - } - - return nil -} - -// implement (*httpSink).Close() -``` - -With just that, we can start using components from this package. One can call -`(*httpSink).Write` to send events as the body of a post request to a -configured URL. - -### Retries - -HTTP can be unreliable. The first feature we'd like is to have some retry: - -```go -hs := newHTTPSink(/*...*/) -retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) -``` - -We now have a sink that will retry events against the `httpSink` until they -succeed. The retry will backoff for one second after 5 consecutive failures -using the breaker strategy. - -### Queues - -This isn't quite enough. We we want a sink that doesn't block while we are -waiting for events to be sent. Let's add a `Queue`: - -```go -queue := NewQueue(retry) -``` - -Now, we have an unbounded queue that will work through all events sent with -`(*Queue).Write`. Events can be added asynchronously to the queue without -blocking the current execution path. This is ideal for use in an http request. - -### Broadcast - -It usually turns out that you want to send to more than one listener. We can -use `Broadcaster` to support this: - -```go -var broadcast = NewBroadcaster() // make it available somewhere in your application. -broadcast.Add(queue) // add your queue! -broadcast.Add(queue2) // and another! -``` - -With the above, we can now call `broadcast.Write` in our http handlers and have -all the events distributed to each queue. Because the events are queued, not -listener blocks another. - -### Extending - -For the most part, the above is sufficient for a lot of applications. However, -extending the above functionality can be done implementing your own `Sink`. The -behavior and semantics of the sink can be completely dependent on the -application requirements. The interface is provided below for reference: - -```go -type Sink { - Write(Event) error - Close() error -} -``` - -Application behavior can be controlled by how `Write` behaves. The examples -above are designed to queue the message and return as quickly as possible. -Other implementations may block until the event is committed to durable -storage. diff --git a/vendor/src/github.com/docker/go-events/broadcast.go b/vendor/src/github.com/docker/go-events/broadcast.go deleted file mode 100644 index e73d758bc..000000000 --- a/vendor/src/github.com/docker/go-events/broadcast.go +++ /dev/null @@ -1,158 +0,0 @@ -package events - -import "github.com/Sirupsen/logrus" - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan Event - adds chan configureRequest - removes chan configureRequest - closed chan chan struct{} -} - -// NewBroadcaster appends one or more sinks to the list of sinks. The -// broadcaster behavior will be affected by the properties of the sink. -// Generally, the sink should accept all messages and deal with reliability on -// its own. Use of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan Event), - adds: make(chan configureRequest), - removes: make(chan configureRequest), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts an event to be dispatched to all sinks. This method will never -// fail and should never block (hopefully!). The caller cedes the memory to the -// broadcaster and should not modify it after calling write. -func (b *Broadcaster) Write(event Event) error { - select { - case b.events <- event: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Add the sink to the broadcaster. -// -// The provided sink must be comparable with equality. Typically, this just -// works with a regular pointer type. -func (b *Broadcaster) Add(sink Sink) error { - return b.configure(b.adds, sink) -} - -// Remove the provided sink. -func (b *Broadcaster) Remove(sink Sink) error { - return b.configure(b.removes, sink) -} - -type configureRequest struct { - sink Sink - response chan error -} - -func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { - response := make(chan error, 1) - - for { - select { - case ch <- configureRequest{ - sink: sink, - response: response}: - ch = nil - case err := <-response: - return err - case <-b.closed: - return ErrSinkClosed - } - } -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - select { - case <-b.closed: - // already closed - return ErrSinkClosed - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - remove := func(target Sink) { - for i, sink := range b.sinks { - if sink == target { - b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) - break - } - } - } - - for { - select { - case event := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(event); err != nil { - if err == ErrSinkClosed { - // remove closed sinks - remove(sink) - continue - } - logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: dropping event") - } - } - case request := <-b.adds: - // while we have to iterate for add/remove, common iteration for - // send is faster against slice. - - var found bool - for _, sink := range b.sinks { - if request.sink == sink { - found = true - break - } - } - - if !found { - b.sinks = append(b.sinks, request.sink) - } - // b.sinks[request.sink] = struct{}{} - request.response <- nil - case request := <-b.removes: - remove(request.sink) - request.response <- nil - case closing := <-b.closed: - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil && err != ErrSinkClosed { - logrus.WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: closing sink failed") - } - } - closing <- struct{}{} - return - } - } -} diff --git a/vendor/src/github.com/docker/go-events/channel.go b/vendor/src/github.com/docker/go-events/channel.go deleted file mode 100644 index 7ee7ea5e2..000000000 --- a/vendor/src/github.com/docker/go-events/channel.go +++ /dev/null @@ -1,47 +0,0 @@ -package events - -// Channel provides a sink that can be listened on. The writer and channel -// listener must operate in separate goroutines. -// -// Consumers should listen on Channel.C until Closed is closed. -type Channel struct { - C chan Event - - closed chan struct{} -} - -// NewChannel returns a channel. If buffer is non-zero, the channel is -// unbuffered. -func NewChannel(buffer int) *Channel { - return &Channel{ - C: make(chan Event, buffer), - closed: make(chan struct{}), - } -} - -// Done returns a channel that will always proceed once the sink is closed. -func (ch *Channel) Done() chan struct{} { - return ch.closed -} - -// Write the event to the channel. Must be called in a separate goroutine from -// the listener. -func (ch *Channel) Write(event Event) error { - select { - case ch.C <- event: - return nil - case <-ch.closed: - return ErrSinkClosed - } -} - -// Close the channel sink. -func (ch *Channel) Close() error { - select { - case <-ch.closed: - return ErrSinkClosed - default: - close(ch.closed) - return nil - } -} diff --git a/vendor/src/github.com/docker/go-events/errors.go b/vendor/src/github.com/docker/go-events/errors.go deleted file mode 100644 index 56db7c251..000000000 --- a/vendor/src/github.com/docker/go-events/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package events - -import "fmt" - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("events: sink closed") -) diff --git a/vendor/src/github.com/docker/go-events/event.go b/vendor/src/github.com/docker/go-events/event.go deleted file mode 100644 index f0f1d9ea5..000000000 --- a/vendor/src/github.com/docker/go-events/event.go +++ /dev/null @@ -1,15 +0,0 @@ -package events - -// Event marks items that can be sent as events. -type Event interface{} - -// Sink accepts and sends events. -type Sink interface { - // Write an event to the Sink. If no error is returned, the caller will - // assume that all events have been committed to the sink. If an error is - // received, the caller may retry sending the event. - Write(event Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/vendor/src/github.com/docker/go-events/filter.go b/vendor/src/github.com/docker/go-events/filter.go deleted file mode 100644 index f2765cfe6..000000000 --- a/vendor/src/github.com/docker/go-events/filter.go +++ /dev/null @@ -1,52 +0,0 @@ -package events - -// Matcher matches events. -type Matcher interface { - Match(event Event) bool -} - -// MatcherFunc implements matcher with just a function. -type MatcherFunc func(event Event) bool - -// Match calls the wrapped function. -func (fn MatcherFunc) Match(event Event) bool { - return fn(event) -} - -// Filter provides an event sink that sends only events that are accepted by a -// Matcher. No methods on filter are goroutine safe. -type Filter struct { - dst Sink - matcher Matcher - closed bool -} - -// NewFilter returns a new filter that will send to events to dst that return -// true for Matcher. -func NewFilter(dst Sink, matcher Matcher) Sink { - return &Filter{dst: dst, matcher: matcher} -} - -// Write an event to the filter. -func (f *Filter) Write(event Event) error { - if f.closed { - return ErrSinkClosed - } - - if f.matcher.Match(event) { - return f.dst.Write(event) - } - - return nil -} - -// Close the filter and allow no more events to pass through. -func (f *Filter) Close() error { - // TODO(stevvooe): Not all sinks should have Close. - if f.closed { - return ErrSinkClosed - } - - f.closed = true - return f.dst.Close() -} diff --git a/vendor/src/github.com/docker/go-events/queue.go b/vendor/src/github.com/docker/go-events/queue.go deleted file mode 100644 index 7c5fc8150..000000000 --- a/vendor/src/github.com/docker/go-events/queue.go +++ /dev/null @@ -1,104 +0,0 @@ -package events - -import ( - "container/list" - "sync" - - "github.com/Sirupsen/logrus" -) - -// Queue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type Queue struct { - dst Sink - events *list.List - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// NewQueue returns a queue to the provided Sink dst. -func NewQueue(dst Sink) *Queue { - eq := Queue{ - dst: dst, - events: list.New(), - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *Queue) Write(event Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - eq.events.PushBack(event) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *Queue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - return eq.dst.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *Queue) run() { - for { - event := eq.next() - - if event == nil { - return // nil block means event queue is closed. - } - - if err := eq.dst.Write(event); err != nil { - logrus.WithFields(logrus.Fields{ - "event": event, - "sink": eq.dst, - }).WithError(err).Warnf("eventqueue: dropped event") - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *Queue) next() Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.(Event) - eq.events.Remove(front) - - return block -} diff --git a/vendor/src/github.com/docker/go-events/retry.go b/vendor/src/github.com/docker/go-events/retry.go deleted file mode 100644 index 4ddb3ac6a..000000000 --- a/vendor/src/github.com/docker/go-events/retry.go +++ /dev/null @@ -1,249 +0,0 @@ -package events - -import ( - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/Sirupsen/logrus" -) - -// RetryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Retry is configured with a RetryStrategy. Concurrent calls to a -// retrying sink are serialized through the sink, meaning that if one is -// in-flight, another will not proceed. -type RetryingSink struct { - sink Sink - strategy RetryStrategy - closed chan struct{} -} - -// NewRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { - rs := &RetryingSink{ - sink: sink, - strategy: strategy, - closed: make(chan struct{}), - } - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *RetryingSink) Write(event Event) error { - logger := logrus.WithField("event", event) - -retry: - select { - case <-rs.closed: - return ErrSinkClosed - default: - } - - if backoff := rs.strategy.Proceed(event); backoff > 0 { - select { - case <-time.After(backoff): - // TODO(stevvooe): This branch holds up the next try. Before, we - // would simply break to the "retry" label and then possibly wait - // again. However, this requires all retry strategies to have a - // large probability of probing the sync for success, rather than - // just backing off and sending the request. - case <-rs.closed: - return ErrSinkClosed - } - } - - if err := rs.sink.Write(event); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logger := logger.WithError(err) // shadow!! - - if rs.strategy.Failure(event, err) { - logger.Errorf("retryingsink: dropped event") - return nil - } - - logger.Errorf("retryingsink: error writing event, retrying") - goto retry - } - - rs.strategy.Success(event) - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *RetryingSink) Close() error { - select { - case <-rs.closed: - return ErrSinkClosed - default: - close(rs.closed) - return rs.sink.Close() - } -} - -// RetryStrategy defines a strategy for retrying event sink writes. -// -// All methods should be goroutine safe. -type RetryStrategy interface { - // Proceed is called before every event send. If proceed returns a - // positive, non-zero integer, the retryer will back off by the provided - // duration. - // - // An event is provided, by may be ignored. - Proceed(event Event) time.Duration - - // Failure reports a failure to the strategy. If this method returns true, - // the event should be dropped. - Failure(event Event, err error) bool - - // Success should be called when an event is sent successfully. - Success(event Event) -} - -// Breaker implements a circuit breaker retry strategy. -// -// The current implementation never drops events. -type Breaker struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - mu sync.Mutex -} - -var _ RetryStrategy = &Breaker{} - -// NewBreaker returns a breaker that will backoff after the threshold has been -// tripped. A Breaker is thread safe and may be shared by many goroutines. -func NewBreaker(threshold int, backoff time.Duration) *Breaker { - return &Breaker{ - threshold: threshold, - backoff: backoff, - } -} - -// Proceed checks the failures against the threshold. -func (b *Breaker) Proceed(event Event) time.Duration { - b.mu.Lock() - defer b.mu.Unlock() - - if b.recent < b.threshold { - return 0 - } - - return b.last.Add(b.backoff).Sub(time.Now()) -} - -// Success resets the breaker. -func (b *Breaker) Success(event Event) { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent = 0 - b.last = time.Time{} -} - -// Failure records the failure and latest failure time. -func (b *Breaker) Failure(event Event, err error) bool { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent++ - b.last = time.Now().UTC() - return false // never drop events. -} - -var ( - // DefaultExponentialBackoffConfig provides a default configuration for - // exponential backoff. - DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ - Base: time.Second, - Factor: time.Second, - Max: 20 * time.Second, - } -) - -// ExponentialBackoffConfig configures backoff parameters. -// -// Note that these parameters operate on the upper bound for choosing a random -// value. For example, at Base=1s, a random value in [0,1s) will be chosen for -// the backoff value. -type ExponentialBackoffConfig struct { - // Base is the minimum bound for backing off after failure. - Base time.Duration - - // Factor sets the amount of time by which the backoff grows with each - // failure. - Factor time.Duration - - // Max is the absolute maxiumum bound for a single backoff. - Max time.Duration -} - -// ExponentialBackoff implements random backoff with exponentially increasing -// bounds as the number consecutive failures increase. -type ExponentialBackoff struct { - config ExponentialBackoffConfig - failures uint64 // consecutive failure counter. -} - -// NewExponentialBackoff returns an exponential backoff strategy with the -// desired config. If config is nil, the default is returned. -func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { - return &ExponentialBackoff{ - config: config, - } -} - -// Proceed returns the next randomly bound exponential backoff time. -func (b *ExponentialBackoff) Proceed(event Event) time.Duration { - return b.backoff(atomic.LoadUint64(&b.failures)) -} - -// Success resets the failures counter. -func (b *ExponentialBackoff) Success(event Event) { - atomic.StoreUint64(&b.failures, 0) -} - -// Failure increments the failure counter. -func (b *ExponentialBackoff) Failure(event Event, err error) bool { - atomic.AddUint64(&b.failures, 1) - return false -} - -// backoff calculates the amount of time to wait based on the number of -// consecutive failures. -func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { - if failures <= 0 { - // proceed normally when there are no failures. - return 0 - } - - factor := b.config.Factor - if factor <= 0 { - factor = DefaultExponentialBackoffConfig.Factor - } - - backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) - - max := b.config.Max - if max <= 0 { - max = DefaultExponentialBackoffConfig.Max - } - - if backoff > max || backoff < 0 { - backoff = max - } - - // Choose a uniformly distributed value from [0, backoff). - return time.Duration(rand.Int63n(int64(backoff))) -} diff --git a/vendor/src/github.com/docker/go/LICENSE b/vendor/src/github.com/docker/go/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/src/github.com/docker/go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/docker/go/canonical/json/decode.go b/vendor/src/github.com/docker/go/canonical/json/decode.go deleted file mode 100644 index 35bac2e2b..000000000 --- a/vendor/src/github.com/docker/go/canonical/json/decode.go +++ /dev/null @@ -1,1094 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil -// and then appends each element to the slice. -// -// To unmarshal a JSON object into a map, Unmarshal replaces the map -// with an empty map and then adds key-value pairs from the object to -// the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshalling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool - canonical bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := int(d.data[d.off]) - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquoteBytes(item) - if !ok { - d.error(errPhase) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type(), false) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.Set(reflect.ValueOf(b[0:n])) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/src/github.com/docker/go/canonical/json/encode.go b/vendor/src/github.com/docker/go/canonical/json/encode.go deleted file mode 100644 index 0fab020e2..000000000 --- a/vendor/src/github.com/docker/go/canonical/json/encode.go +++ /dev/null @@ -1,1245 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - return marshal(v, false) -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalCanonical is like Marshal but encodes into Canonical JSON. -// Read more at: http://wiki.laptop.org/go/Canonical_JSON -func MarshalCanonical(v interface{}) ([]byte, error) { - return marshal(v, true) -} - -func marshal(v interface{}, canonical bool) ([]byte, error) { - e := &encodeState{canonical: canonical} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML
-
- Layer -
-
- Images are composed of layers. Each layer is a set of filesystem - changes. Layers do not have configuration metadata such as environment - variables or default arguments - these are properties of the image as a - whole rather than any particular layer. -
-
- Image JSON -
-
- Each image has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. The JSON structure also references a cryptographic hash of - each layer used by the image, and provides history information for - those layers. This JSON is considered to be immutable, because changing - it would change the computed ImageID. Changing it means creating a new - derived image, instead of changing the existing image. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Layer DiffID -
-
- Layers are referenced by cryptographic hashes of their serialized - representation. This is a SHA256 digest over the tar archive used to - transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Layers must be packed and unpacked reproducibly to avoid changing the - layer ID, for example by using tar-split to save the tar headers. Note - that the digest used as the layer ID is taken over an uncompressed - version of the tar. -
-
- Layer ChainID -
-
- For convenience, it is sometimes useful to refer to a stack of layers - with a single identifier. This is called a ChainID. For a - single layer (or the layer at the bottom of a stack), the - ChainID is equal to the layer's DiffID. - Otherwise the ChainID is given by the formula: - ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). -
-
- ImageID -
-
- Each image's ID is given by the SHA256 hash of its configuration JSON. It is - represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Since the configuration JSON that gets hashed references hashes of each - layer in the image, this formulation of the ImageID makes images - content-addresable. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. Tag values are limited to the set of characters - [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. A repository name is made up of slash-separated - name components, optionally prefixed by a DNS hostname. The hostname - must follow comply with standard DNS rules, but may not contain - _ characters. If a hostname is present, it may optionally - be followed by a port number in the format :8080. - Name components may contain lowercase characters, digits, and - separators. A separator is defined as a period, one or two underscores, - or one or more dashes. A name component may not start or end with - a separator. -
-