Compare commits

...

3 Commits

Author SHA1 Message Date
Knative Automation d697f4b547
upgrade to latest dependencies (#2047)
bumping knative.dev/eventing 6a695cb...0dadfd9:
  > 0dadfd9 [release-1.11] Scheduler: fix reserved replicas handling, blocking autoscaler and overcommitted pods (# 7374)
  > c1626f1 [release-1.11] Update dependencies (# 7362)
  > 46cc775 [release-1.11] TLS certificate rotation tests (# 7103) (# 7346)
bumping knative.dev/networking 75dcd54...a595805:
  > a595805 Run ./hack/update-deps.sh --upgrade --release 1.11 (# 878)
bumping knative.dev/pkg bd99f2f...56bfe0d:
  > 56bfe0d [release-1.11] [CVE-2023-44487] Disable http2 for webhooks (# 2875)
bumping knative.dev/client-pkg bd83cf4...bcb06af:
  > bcb06af upgrade to latest dependencies (# 123)
bumping knative.dev/serving f60eb32...4ff7168:
  > 4ff7168 [release-1.11] bump x/net to v0.17 (# 14516)
  > fb9129c Leave a comment which will trigger a new dot release (# 14500)

Signed-off-by: Knative Automation <automation@knative.team>
2023-10-23 19:09:46 +00:00
Knative Automation 24ce47272c
upgrade to latest dependencies (#2016)
bumping golang.org/x/net c73c09c...b225e7c:
  > b225e7c http2: limit maximum handler goroutines to MaxConcurrentStreams
  > 88194ad go.mod: update golang.org/x dependencies
  > 2b60a61 quic: fix several bugs in flow control accounting
  > 73d82ef quic: handle DATA_BLOCKED frames
  > 5d5a036 quic: handle streams moving from the data queue to the meta queue
  > 350aad2 quic: correctly extend peer's flow control window after MAX_DATA
  > 21814e7 quic: validate connection id transport parameters
  > a600b35 quic: avoid redundant MAX_DATA updates
  > ea63359 http2: check stream body is present on read timeout
  > ddd8598 quic: version negotiation
  > 3b0ab98 quic: avoid deadlock on listener close
  > 732b4bc quic: report initial TLS errors
  > 8add2e1 quic: enforce AEAD integrity limit
  > 7c40cbd dns/dnsmessage: use map[string]uint16 instead of map[string]int
  > b3f1f23 quic: initiate key updates
  > 18f2095 quic: handle peer-initiated key updates
  > 008c0af quic: refactor keys for key updates
  > 6a4de22 quic: connection lifetime management
  > ea4a2ff quic: fix detection of reserved bits in 1-RTT packets
  > 47caaff quic: send and receive UDP datagrams
  > 02eb0f3 quic: avoid deadlock when updating inbound conn-level flow control
  > 57bce0e quic: move packetType.String out of test-only code
  > cae7dab quic: outbound connection-level flow control
  > 217377b quic: inbound connection-level flow control
  > 044c308 quic: check for packet overflow when writing MAX_STREAMS
  > 5401f76 quic: test lost bidi MAX_STREAMS frame handling
  > da5f9f7 quic: don't block Writes on stream-level flow control
  > c3c6260 quic: simplify gate operations
  > 717519d quic: limits on the number of open streams
  > 94087ad dns/dnsmessage: mention that Name in non-escaped
  > 2a0da8b go.mod: update golang.org/x dependencies
  > 97384c1 quic: remove streams from the conn when done
  > 03d5e62 http2: remove unused ClientConn.tconnClosed
  > b82f062 quic: include ignored frames in test log output
  > 7374d34 quic: don't block when closing read-only streams
  > b4d09be dns/dnsmessage: compress all names while appending to a buffer
  > 8b010a5 quic: fix race condition in runAsync test helper
  > fe2abcb quic: validate stream limits in transport params
  > d1b0a97 quic: avoid sending 1-RTT frames in initial/handshake packets
  > 4332436 quic: send more transport parameters
  > 52fbe37 quic: add test helpers for acking packets
  > 4a2d37e http2: remove Docker-requiring tests
  > efb8d7a dns/dnsmessage: don't include bytes after name.Length in the compression map
  > d8d8478 quic: read-closing and reset streams, wait on close
  > 3d2be97 quic: fix testConn.uncheckedHandshake
  > 0f7767c dns/dnsmessage: validate cached section when skipping sections
  > f89417c dns/dnsmessage: reduce Parser size
  > 9cde5a0 net/http2: remove awaitGracefulShutdown
  > 95cb3bb dns/dnsmessage: show AD and CD bit in Header.GoString()
  > 126a5f3 quic: fix some bugs in the sendable stream list
  > f09e753 quic: send and receive stream data
  > 1e23797 publicsuffix: update table to version 20230804
  > c8c0290 go.mod: update golang.org/x dependencies
  > 0b21d06 quic: framework for testing blocking operations
  > 4648651 quic: add -vv flag for more verbose tests
  > 60ae793 quic: don't send session tickets
  > a7da556 http2: optimize buffer allocation in transport
  > 167593b quic: create and accept streams
  > 8ffa475 html: only render content literally in the HTML namespace
  > 63fe334 quic: gate and queue synchronization primitives
  > bd8ac9e quic: fill out connection id handling
  > 08001cc quic: debug logging of packets
  > dd0aa33 quic: tls handshake
  > 5e678bb quic: CRYPTO stream handling
  > dd5bc96 internal/quic: deflake TestConnTestConn
  > d0912d4 quic: add pipe type
  > 8db2ead quic: transport parameter encoding and decoding
  > 0adcadf quic: send and receive datagrams
  > 16cc77a quic: print better stacks on SIGQUIT
  > 4a3f925 quic: basic connection event loop
  > 57553cb quic: connection ids
  > 304cc91 quic: tracking of received packets and acks to send
  > 9475ce1 quic: fix typos in comments
  > d8f9c01 dns/dnsmessage: add fuzz test
  > 8126108 dns/dnsmessage: update Parser docs
  > a1613c2 http2: handle trailing colon in authorityAddr
bumping golang.org/x/crypto e984872...e3cc52e:
  > e3cc52e go.mod: update golang.org/x dependencies
  > 833695f ssh: add server side support for ping@openssh.com protocol extension
  > ec07f4e chacha20: drop Go 1.10 compatibility for arm64
  > b665ba6 all: use crypto/ed25519 instead of golang.org/x/crypto/ed25519
  > a1aeb9b ssh: add test cases for compatibility with old (buggy) clients
  > 28c53ff ssh: add MultiAlgorithmSigner
  > 3f0842a sha3: have ShakeHash extend hash.Hash
  > e90f1e1 cryptobyte: add uint48 methods
  > d359caa ssh: support for marshaling keys using the OpenSSH format
  > c5370d2 ssh: check the declared public key algo against decoded one
  > 0d375be go.mod: update golang.org/x dependencies
  > b4ddeed go.mod: update golang.org/x dependencies
  > edc325d ssh: fix call to Fatalf from a non-test goroutine
  > eab9315 ssh: add diffie-hellman-group16-sha512 kex
  > ddfa821 ssh: ignore invalid MACs and KEXs just like we do for ciphers
  > d08e19b x509roots/fallback: update bundle
  > 12e1fcd internal/wycheproof: skip all tests in short test mode
  > 3f8f064 ssh: prefer sha256 based RSA key algorithms
  > 5df3b59 ssh: disable client agent tests on Windows
  > 2e82bdd fix TestValidTerminalMode: missing output from echo SHELL $SHELL
  > 64e0e99 ssh: fix RSA certificate and public key authentication with older clients
  > 23b1b90 ssh: prefer sha256 based MAC algorithms
bumping golang.org/x/term edd9fb7...ea6303a:
  > ea6303a go.mod: update golang.org/x dependencies
  > f413282 go.mod: update golang.org/x dependencies
  > 19e73c2 go.mod: update golang.org/x dependencies
bumping golang.org/x/text e503480...f488e19:
  > f488e19 unicode/norm: fix function name on comment
  > fb697c0 cmd/gotext: actually use -dir flag
  > f3e69ed cmd/gotext: fix misbehaviors
  > ab07ad1 all: remove repetitive words
bumping knative.dev/pkg aef227e...bd99f2f:
  > bd99f2f Bumped x/net to v0.17.0 to fix cve-2023-44487 on release-1.11 (# 2860)
bumping golang.org/x/sys a1a9c4b...2964e1e:
  > 2964e1e unix: remove unused readlen and writelen
  > 3186bae windows: remove the 8192-codepoint arg limit in FuzzComposeCommandLine
  > 807530f unix: remove lists of unimplemented syscalls
  > 01c413d windows: document the return type mismatch for CommandLineToArgv
  > e649453 windows: convert TestCommandLineRecomposition to a fuzz test and fix discrepancies
  > 8858c72 unix: update riscv_hwprobe constants
  > 71c9d87 windows: add console ConPTY API
  > aa9470e unix/linux: update to Linux kernel 6.5
  > c7ff727 unix: fix double copy in (*SockaddrALG).sockaddr
  > c7cbcbb unix: add TestSockaddrALG
  > 8d9dcc4 unix: modernize test helpers
  > a26c6de unix: use errnoErr in generated wrappers on solaris
  > f3ef2d1 cpu: remove the use of ioutil
  > 5a17dda unix: remove the use of ioutil
  > 38ebf41 cpu: fix wrong cache line size of riscv64
  > fdc7ef4 all: remove ioutil usage from tests
  > fc717d3 unix: remove usage of ioutil.TempFile in tests
  > cb4ecd9 unix: use filepath in tests where appropriate
  > 4848eb0 windows,windows\svc,windows\svc\mgr: use unsafe.Slice instead of unsafeheader.Slice
  > 0e97d69 all: use t.TempDir in tests
  > 0514fec unix: rm unused zos test helper functions
  > bfd1ebb unix: remove unused ptracePtr on darwin
  > 81cb935 unix, windows: use ^TestName$ regular pattern for invoking a single test
  > 5154691 unix/linux: update to gcc 13.2.0, qemu 8.0.3 for loong64 and and Go 1.21.0 for all
  > e8190d9 windows: don't check non-existent return code in GetStartupInfo
  > ad02017 windows: use SyscallN in mkwinsyscall
  > 7023367 cpu: remove repetitive word
  > eabbd5c cpu: add support for amx detection
  > 552c4e8 unix: avoid setting O_NONBLOCK needlessly by checking flags beforehand
  > ee57887 unix: add SchedSetAttr and SchedGetAttr for Linux
  > 60ecf13 windows: add TimeBeginPeriod and TimeEndPeriod syscalls
  > 104d401 unix: add riscv_hwprobe for riscv64
  > 70f4e40 unix: retry fetching of lists through sysctl if the size changes
  > ad7130c unix: add more block device ioctl numbers
  > c406141 all: fix some typos
  > 25d0004 unix: fix last argument of pselect6 on linux
  > 706fa98 windows: remove repetitive words
  > 3fead03 unix: add Mremap for netbsd
bumping knative.dev/serving f1617ef...f60eb32:
  > f60eb32 [release-1.11] fix securityContext for Knative Service Pod (user-container and queue-proxy) (# 14378)
bumping knative.dev/eventing 248a471...6a695cb:
  > 6a695cb [release-1.11] Adding description for the built-in event types of our duck sources (# 7276)
  > 479724b [release-1.11] Option to specify different namespace for resources (# 7270)
  > 1e8614d [release-1.11] Added hostnames to tls certificates (# 7259)
  > e3ba93b Fixed eventtype create-delete loop on built in sources (# 7245) (# 7250)
  > cf0f9d5 [release-1.11] Autoscaler considers reserved and pending replicas (# 7216)
  > 3ccfc1c [release-v1.11] Cherry pick eventtype autocreate fixes (# 7201)
  > 1a5f37c [release-1.11] Global resync brokers on config-features changes (# 7134)
  > d3a0faa [release-1.11] Watch config-features in IMC controller (# 7133)
  > fee0553 [release-1.11] Upgrade to latest dependencies (# 7132)
  > cf146f1 [release-1.11] Fix edit role aggregation (# 7128)

Signed-off-by: Knative Automation <automation@knative.team>
2023-10-12 13:19:56 +00:00
Matej Vasek 82c76101a3
[release-1.11] backports for release-1.11.1 (#1998)
* chore: use tkn tasks from PR branch in CI (#1914)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: released binaries refer correct task yamls (#1916)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: refer correct tkn yaml in prow test (#1918)

Without this change prow test will refer tkn yamls from the main branch
not from the PR head branch.

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update release generation (#1924)

* Minimize release binary size.
* Release latest version of buildpack tekton task.

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* test: update github ref used on e2e oncluster tests (#1917)

* test: Split of GH oncluster tests by builder. Added FUNC_BUILDER env var for e2e oncluter tests (#1963)

* Use our own s2i image (#1971)

Our image is much more recent and it is multiarch.

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update buildah image ref (#1960)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update CA certs (#1944)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update Quarkus platform version to 3.4.1 (#1989)

Co-authored-by: Knative Automation <automation@knative.team>
Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update mvn wrapper in Quarkus template (#1987)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: use ./mvnw not mvn in tests (#1988)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* chore: update Springboot platform version

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: docker registry/repository parsing (#1929)

* fix: docker registry/repository parsing

Use go-containerregistry to do parsing.

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: use kebab-case instead of camelCase

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: use kebab-case instead of camelCase

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* Fix failing concurrent test on Windows (#1890)

* src: better debugging

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: wait for both builds

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: detection of process liveness on Windows

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: make symlink relative

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup: cleanup

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* allowing on cluster build for go runtime (#1445)

* allowing on cluster build for go runtime

* warning message added for go and rust builder

* gofmt

* fixups

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>
Co-authored-by: Matej Vasek <mvasek@redhat.com>

* Use custom jammy paketo builder (#1911)

* chore: use custom jammy paketo builder

Use our own modified jammy builder with additional buildpacks for
GoFunc and Rust. This enables on cluster build for Go and Rust functions.
Where possible (Go, Java) we use "tiny" variant, other runtimes use "base"
variant.

The updated task is new file instead of modifying existing task
this is done for sake of keeping compatiblility.

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup: remove unnecessary code per review request

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fixup: podman test refers correct tkn task yamls

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* doc: Go and Rust on cluster build is supported (#1923)

* doc: Go and Rust on cluster build is supported

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* doc: build envvars

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: direct upload ppc64le, x390x (#1958)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* fix: report correct error when task doesn't exist (#1915)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

* feat: tekton task urls in the env sub-cmd output (#1925)

Signed-off-by: Matej Vasek <mvasek@redhat.com>

---------

Signed-off-by: Matej Vasek <mvasek@redhat.com>
Co-authored-by: Jefferson Ramos <jeramos@redhat.com>
Co-authored-by: Knative Automation <automation@knative.team>
Co-authored-by: Shashank Sharma <48708039+Shashankft9@users.noreply.github.com>
2023-10-02 12:01:49 +00:00
170 changed files with 15170 additions and 15660 deletions

View File

@ -9,6 +9,7 @@ jobs:
matrix: matrix:
go: [1.20.2] go: [1.20.2]
os: ["ubuntu-latest"] os: ["ubuntu-latest"]
func_builder: ["pack", "s2i"]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -29,6 +30,9 @@ jobs:
- name: E2E On Cluster Test (Runtimes) - name: E2E On Cluster Test (Runtimes)
env: env:
TEST_TAGS: runtime TEST_TAGS: runtime
FUNC_REPO_REF: ${{ github.event.pull_request.head.repo.full_name }}
FUNC_REPO_BRANCH_REF: ${{ github.head_ref }}
FUNC_BUILDER: ${{ matrix.func_builder }}
run: make test-e2e-on-cluster run: make test-e2e-on-cluster
- uses: codecov/codecov-action@v3 - uses: codecov/codecov-action@v3
with: with:

View File

@ -29,6 +29,8 @@ jobs:
- name: E2E On Cluster Test - name: E2E On Cluster Test
env: env:
E2E_RUNTIMES: "" E2E_RUNTIMES: ""
FUNC_REPO_REF: ${{ github.event.pull_request.head.repo.full_name }}
FUNC_REPO_BRANCH_REF: ${{ github.head_ref }}
run: make test-e2e-on-cluster run: make test-e2e-on-cluster
- uses: codecov/codecov-action@v3 - uses: codecov/codecov-action@v3
with: with:

View File

@ -11,7 +11,7 @@ jobs:
name: E2E Test name: E2E Test
strategy: strategy:
matrix: matrix:
runtime: ["node", "go", "python", "quarkus", "springboot", "typescript"] runtime: ["node", "go", "python", "quarkus", "springboot", "typescript", "rust"]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -42,6 +42,9 @@ jobs:
- name: Patch Hosts - name: Patch Hosts
run: ./hack/patch-hosts.sh run: ./hack/patch-hosts.sh
- name: Integration Test - name: Integration Test
env:
FUNC_REPO_REF: ${{ github.event.pull_request.head.repo.full_name }}
FUNC_REPO_BRANCH_REF: ${{ github.head_ref }}
run: make test-integration run: make test-integration
- name: Dump Cluster Logs - name: Dump Cluster Logs
if: always() if: always()

View File

@ -37,5 +37,8 @@ jobs:
- name: Local Registry - name: Local Registry
run: ./hack/registry.sh run: ./hack/registry.sh
- name: Integration Test Podman - name: Integration Test Podman
env:
FUNC_REPO_REF: ${{ github.event.pull_request.head.repo.full_name }}
FUNC_REPO_BRANCH_REF: ${{ github.head_ref }}
run: ./hack/test-integration-podman.sh run: ./hack/test-integration-podman.sh

View File

@ -33,6 +33,9 @@ jobs:
java-version: ${{ matrix.java }} java-version: ${{ matrix.java }}
- name: Core Unit Tests - name: Core Unit Tests
run: make test run: make test
env:
FUNC_REPO_REF: ${{ github.event.pull_request.head.repo.full_name }}
FUNC_REPO_BRANCH_REF: ${{ github.head_ref }}
- name: Template Unit Tests - name: Template Unit Tests
run: make test-templates run: make test-templates
- uses: codecov/codecov-action@v3 - uses: codecov/codecov-action@v3

View File

@ -27,7 +27,15 @@ VTAG := $(shell git tag --points-at HEAD | head -1)
VTAG := $(shell [ -z $(VTAG) ] && echo $(ETAG) || echo $(VTAG)) VTAG := $(shell [ -z $(VTAG) ] && echo $(ETAG) || echo $(VTAG))
VERS ?= $(shell git describe --tags --match 'v*') VERS ?= $(shell git describe --tags --match 'v*')
KVER ?= $(shell git describe --tags --match 'knative-*') KVER ?= $(shell git describe --tags --match 'knative-*')
LDFLAGS := "-X main.date=$(DATE) -X main.vers=$(VERS) -X main.kver=$(KVER) -X main.hash=$(HASH)"
LDFLAGS := -X main.date=$(DATE) -X main.vers=$(VERS) -X main.kver=$(KVER) -X main.hash=$(HASH)
ifneq ($(FUNC_REPO_REF),)
LDFLAGS += -X knative.dev/func/pkg/pipelines/tekton.FuncRepoRef=$(FUNC_REPO_REF)
endif
ifneq ($(FUNC_REPO_BRANCH_REF),)
LDFLAGS += -X knative.dev/func/pkg/pipelines/tekton.FuncRepoBranchRef=$(FUNC_REPO_BRANCH_REF)
endif
MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# All Code prerequisites, including generated files, etc. # All Code prerequisites, including generated files, etc.
@ -59,10 +67,10 @@ help:
build: $(BIN) ## (default) Build binary for current OS build: $(BIN) ## (default) Build binary for current OS
$(BIN): $(CODE) $(BIN): $(CODE)
env CGO_ENABLED=0 go build -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 go build -ldflags "$(LDFLAGS)" ./cmd/$(BIN)
test: $(CODE) ## Run core unit tests test: $(CODE) ## Run core unit tests
go test -race -cover -coverprofile=coverage.txt ./... go test -ldflags "$(LDFLAGS)" -race -cover -coverprofile=coverage.txt ./...
.PHONY: check .PHONY: check
check: $(BIN_GOLANGCI_LINT) ## Check code quality (lint) check: $(BIN_GOLANGCI_LINT) ## Check code quality (lint)
@ -152,12 +160,12 @@ test-python: ## Test Python templates
cd templates/python/http && python3 test_func.py && rm -rf __pycache__ cd templates/python/http && python3 test_func.py && rm -rf __pycache__
test-quarkus: ## Test Quarkus templates test-quarkus: ## Test Quarkus templates
cd templates/quarkus/cloudevents && mvn -q test && mvn clean cd templates/quarkus/cloudevents && ./mvnw -q test && ./mvnw clean
cd templates/quarkus/http && mvn -q test && mvn clean cd templates/quarkus/http && ./mvnw -q test && ./mvnw clean
test-springboot: ## Test Spring Boot templates test-springboot: ## Test Spring Boot templates
cd templates/springboot/cloudevents && mvn -q test && mvn clean cd templates/springboot/cloudevents && ./mvnw -q test && ./mvnw clean
cd templates/springboot/http && mvn -q test && mvn clean cd templates/springboot/http && ./mvnw -q test && ./mvnw clean
test-rust: ## Test Rust templates test-rust: ## Test Rust templates
cd templates/rust/cloudevents && cargo -q test && cargo clean cd templates/rust/cloudevents && cargo -q test && cargo clean
@ -193,12 +201,12 @@ templates/certs/ca-certificates.crt:
################### ###################
test-integration: ## Run integration tests using an available cluster. test-integration: ## Run integration tests using an available cluster.
go test -tags integration -timeout 30m --coverprofile=coverage.txt ./... -v go test -ldflags "$(LDFLAGS)" -tags integration -timeout 30m --coverprofile=coverage.txt ./... -v
.PHONY: func-instrumented .PHONY: func-instrumented
func-instrumented: ## Func binary that is instrumented for e2e tests func-instrumented: ## Func binary that is instrumented for e2e tests
env CGO_ENABLED=1 go build -ldflags $(LDFLAGS) -cover -o func ./cmd/func env CGO_ENABLED=1 go build -ldflags "$(LDFLAGS)" -cover -o func ./cmd/func
test-e2e: func-instrumented ## Run end-to-end tests using an available cluster. test-e2e: func-instrumented ## Run end-to-end tests using an available cluster.
./test/e2e_extended_tests.sh ./test/e2e_extended_tests.sh
@ -218,37 +226,37 @@ cross-platform: darwin-arm64 darwin-amd64 linux-amd64 linux-arm64 linux-ppc64le
darwin-arm64: $(BIN_DARWIN_ARM64) ## Build for mac M1 darwin-arm64: $(BIN_DARWIN_ARM64) ## Build for mac M1
$(BIN_DARWIN_ARM64): generate/zz_filesystem_generated.go $(BIN_DARWIN_ARM64): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o $(BIN_DARWIN_ARM64) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o $(BIN_DARWIN_ARM64) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
darwin-amd64: $(BIN_DARWIN_AMD64) ## Build for Darwin (macOS) darwin-amd64: $(BIN_DARWIN_AMD64) ## Build for Darwin (macOS)
$(BIN_DARWIN_AMD64): generate/zz_filesystem_generated.go $(BIN_DARWIN_AMD64): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o $(BIN_DARWIN_AMD64) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o $(BIN_DARWIN_AMD64) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
linux-amd64: $(BIN_LINUX_AMD64) ## Build for Linux amd64 linux-amd64: $(BIN_LINUX_AMD64) ## Build for Linux amd64
$(BIN_LINUX_AMD64): generate/zz_filesystem_generated.go $(BIN_LINUX_AMD64): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BIN_LINUX_AMD64) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BIN_LINUX_AMD64) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
linux-arm64: $(BIN_LINUX_ARM64) ## Build for Linux arm64 linux-arm64: $(BIN_LINUX_ARM64) ## Build for Linux arm64
$(BIN_LINUX_ARM64): generate/zz_filesystem_generated.go $(BIN_LINUX_ARM64): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o $(BIN_LINUX_ARM64) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o $(BIN_LINUX_ARM64) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
linux-ppc64le: $(BIN_LINUX_PPC64LE) ## Build for Linux ppc64le linux-ppc64le: $(BIN_LINUX_PPC64LE) ## Build for Linux ppc64le
$(BIN_LINUX_PPC64LE): generate/zz_filesystem_generated.go $(BIN_LINUX_PPC64LE): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -o $(BIN_LINUX_PPC64LE) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -o $(BIN_LINUX_PPC64LE) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
linux-s390x: $(BIN_LINUX_S390X) ## Build for Linux s390x linux-s390x: $(BIN_LINUX_S390X) ## Build for Linux s390x
$(BIN_LINUX_S390X): generate/zz_filesystem_generated.go $(BIN_LINUX_S390X): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -o $(BIN_LINUX_S390X) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -o $(BIN_LINUX_S390X) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
windows: $(BIN_WINDOWS) ## Build for Windows windows: $(BIN_WINDOWS) ## Build for Windows
$(BIN_WINDOWS): generate/zz_filesystem_generated.go $(BIN_WINDOWS): generate/zz_filesystem_generated.go
env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o $(BIN_WINDOWS) -ldflags $(LDFLAGS) ./cmd/$(BIN) env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o $(BIN_WINDOWS) -trimpath -ldflags "$(LDFLAGS) -w -s" ./cmd/$(BIN)
###################### ######################
##@ Schemas ##@ Schemas

View File

@ -1198,7 +1198,7 @@ func testRegistryLoads(cmdFn commandConstructor, t *testing.T) {
f := fn.Function{ f := fn.Function{
Root: root, Root: root,
Name: "myFunc", Name: "my-func",
Runtime: "go", Runtime: "go",
Registry: "example.com/alice", Registry: "example.com/alice",
} }
@ -1219,7 +1219,7 @@ func testRegistryLoads(cmdFn commandConstructor, t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
expected := "example.com/alice/myFunc:latest" expected := "example.com/alice/my-func:latest"
if f.Image != expected { if f.Image != expected {
t.Fatalf("expected image name '%v'. got %v", expected, f.Image) t.Fatalf("expected image name '%v'. got %v", expected, f.Image)
} }

View File

@ -15,6 +15,7 @@ import (
"knative.dev/func/pkg/config" "knative.dev/func/pkg/config"
"knative.dev/func/pkg/functions" "knative.dev/func/pkg/functions"
"knative.dev/func/pkg/k8s" "knative.dev/func/pkg/k8s"
"knative.dev/func/pkg/pipelines/tekton"
) )
var format string = "json" var format string = "json"
@ -64,6 +65,7 @@ type Environment struct {
Templates map[string][]string Templates map[string][]string
Environment []string Environment []string
Cluster string Cluster string
TektonTasks map[string]string
Defaults config.Global Defaults config.Global
} }
@ -131,6 +133,11 @@ func runEnvironment(cmd *cobra.Command, newClient ClientFactory, v *Version) (er
Environment: envs, Environment: envs,
Cluster: host, Cluster: host,
Defaults: defaults, Defaults: defaults,
TektonTasks: map[string]string{
"func-buildpack": tekton.BuildpackTaskURL,
"func-s2i": tekton.S2ITaskURL,
"func-deploy": tekton.DeployTaskURL,
},
} }
var s []byte var s []byte

View File

@ -8,6 +8,10 @@ To build the core project, run `make` from the repository root. This will resul
To remove built artifacts, use `make clean`. To remove built artifacts, use `make clean`.
### Build affecting environment variables
* `FUNC_REPO_REF` affects which github repo will be used to fetch tekton tasks for on cluster build. Default: `knative/func`.
* `FUNC_REPO_BRANCH_REF` affects which github branch will be used to fetch tekton tasks for on cluster build. Default: `main`.
## Testing ## Testing
To run core unit tests, use `make test`. To run core unit tests, use `make test`.

View File

@ -2,9 +2,6 @@
This guide describes how you can build a Function on Cluster with Tekton Pipelines. The on cluster build is enabled by fetching Function source code from a remote Git repository. Buildpacks or S2I builder strategy can be used to build the Function image. This guide describes how you can build a Function on Cluster with Tekton Pipelines. The on cluster build is enabled by fetching Function source code from a remote Git repository. Buildpacks or S2I builder strategy can be used to build the Function image.
> **Note**
> Not all runtimes support on cluster builds. **Go** and **Rust** are not currently supported.
## Prerequisite ## Prerequisite
1. Install Tekton Pipelines on the cluster. Please refer to [Tekton Pipelines documentation](https://github.com/tektoncd/pipeline/blob/main/docs/install.md) or run the following command: 1. Install Tekton Pipelines on the cluster. Please refer to [Tekton Pipelines documentation](https://github.com/tektoncd/pipeline/blob/main/docs/install.md) or run the following command:
```bash ```bash
@ -34,12 +31,14 @@ git remote add origin git@github.com:my-repo/my-function.git
``` ```
4. Update the Function configuration in `func.yaml` to enable on cluster builds for the Git repository: 4. Update the Function configuration in `func.yaml` to enable on cluster builds for the Git repository:
```yaml ```yaml
build: git # required, specify `git` build type build:
git: git:
url: https://github.com/my-repo/my-function.git # required, git repository with the function source code url: https://github.com/my-repo/my-function.git # required, git repository with the function source code
revision: main # optional, git revision to be used (branch, tag, commit) revision: main # optional, git revision to be used (branch, tag, commit)
# contextDir: myfunction # optional, needed only if the function is not located # contextDir: myfunction # optional, needed only if the function is not located in the repository root folder
# in the repository root folder buildpacks: []
builder: ""
buildEnvs: []
``` ```
5. Implement the business logic of your Function, then commit and push changes 5. Implement the business logic of your Function, then commit and push changes
```bash ```bash

File diff suppressed because it is too large Load Diff

20
go.mod
View File

@ -38,21 +38,21 @@ require (
github.com/tektoncd/pipeline v0.47.0 github.com/tektoncd/pipeline v0.47.0
github.com/whilp/git-urls v1.0.0 github.com/whilp/git-urls v1.0.0
github.com/xanzy/go-gitlab v0.83.0 github.com/xanzy/go-gitlab v0.83.0
golang.org/x/crypto v0.11.0 golang.org/x/crypto v0.14.0
golang.org/x/net v0.12.0 golang.org/x/net v0.17.0
golang.org/x/oauth2 v0.9.0 golang.org/x/oauth2 v0.9.0
golang.org/x/sync v0.3.0 golang.org/x/sync v0.3.0
golang.org/x/term v0.10.0 golang.org/x/term v0.13.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.4.0 gotest.tools/v3 v3.4.0
k8s.io/api v0.26.5 k8s.io/api v0.26.5
k8s.io/apimachinery v0.26.5 k8s.io/apimachinery v0.26.5
k8s.io/client-go v1.5.2 k8s.io/client-go v1.5.2
knative.dev/client-pkg v0.0.0-20230726202841-bd83cf476909 knative.dev/client-pkg v0.0.0-20231012115829-bcb06af7a827
knative.dev/eventing v0.38.0 knative.dev/eventing v0.38.5
knative.dev/hack v0.0.0-20230712131415-ddae80293c43 knative.dev/hack v0.0.0-20230712131415-ddae80293c43
knative.dev/pkg v0.0.0-20230718152110-aef227e72ead knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626
knative.dev/serving v0.38.0 knative.dev/serving v0.38.2
) )
require ( require (
@ -232,8 +232,8 @@ require (
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/mod v0.12.0 // indirect golang.org/x/mod v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect golang.org/x/tools v0.9.1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
@ -251,7 +251,7 @@ require (
k8s.io/klog/v2 v2.100.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
knative.dev/networking v0.0.0-20230718160410-75dcd54d9510 // indirect knative.dev/networking v0.0.0-20231012062757-a5958051caf8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect

40
go.sum
View File

@ -1033,8 +1033,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1137,8 +1137,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1265,8 +1265,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1274,8 +1274,8 @@ golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1288,8 +1288,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1616,18 +1616,18 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/client-pkg v0.0.0-20230726202841-bd83cf476909 h1:tn75M7CVM0eSg4nH0yxA0tqA/g9O7pT1518mZS3VCFI= knative.dev/client-pkg v0.0.0-20231012115829-bcb06af7a827 h1:53PLMPZXF27yrfwR4lb9eMYp1g8BxrQDMj7A8tjTEFc=
knative.dev/client-pkg v0.0.0-20230726202841-bd83cf476909/go.mod h1:TuCXbcV3spDkZ3nGefq83OuLaICpfVpRJpegmYH9FfE= knative.dev/client-pkg v0.0.0-20231012115829-bcb06af7a827/go.mod h1:X1vX6B2s4wct9tkO2it3TeliLw+CbbUgeFlR61H7blI=
knative.dev/eventing v0.38.0 h1:n6/k9IJ1kOvpZx4CMLqa1FG7g2iBiyKXwBu1Fy/81q4= knative.dev/eventing v0.38.5 h1:NvSy3lek9IbLLWEot36NyAfNv7VkJNl38F1ItVL0D6s=
knative.dev/eventing v0.38.0/go.mod h1:JUqEC0zoyfYqhRHFz8VUxjkxH9G1cQ/Y+UvhXTxUXgI= knative.dev/eventing v0.38.5/go.mod h1:g+iAS+KBRSKULEPqoVnseMkObDeq3SJhqefbuIu8zY8=
knative.dev/hack v0.0.0-20230712131415-ddae80293c43 h1:3SE06uNfSFGm/5XS+0trbyCUpgsOaBeyhPQU8FPNFz8= knative.dev/hack v0.0.0-20230712131415-ddae80293c43 h1:3SE06uNfSFGm/5XS+0trbyCUpgsOaBeyhPQU8FPNFz8=
knative.dev/hack v0.0.0-20230712131415-ddae80293c43/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= knative.dev/hack v0.0.0-20230712131415-ddae80293c43/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20230718160410-75dcd54d9510 h1:eNbWjEkbtGjCqK5TB4S0CjSSDd4PvyXm+w7bM9/zce4= knative.dev/networking v0.0.0-20231012062757-a5958051caf8 h1:oZMbAAXyiDZUYoQ4qEjL6uxXrL1xBVfEyQJx8sP0XEg=
knative.dev/networking v0.0.0-20230718160410-75dcd54d9510/go.mod h1:Vngl91M++hqgoGNIjCii7MXnsEeN3kRbGC1aodhFqbk= knative.dev/networking v0.0.0-20231012062757-a5958051caf8/go.mod h1:XwZwqJoLZR92N6F/sj8lHxttnry/Kiz+OgG8+uVuYn0=
knative.dev/pkg v0.0.0-20230718152110-aef227e72ead h1:2dDzorpKuVZW3Qp7TbirMMq16FbId8f6bacQFX8jXLw= knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626 h1:qFE+UDBRg6cpF5LbA0sv1XK4XZ36Z7aTRCa+HcuxnNQ=
knative.dev/pkg v0.0.0-20230718152110-aef227e72ead/go.mod h1:WmrwRV/P+hGHoMraAEfwg6ec+fBTf+Obu41v354Iabc= knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0=
knative.dev/serving v0.38.0 h1:h2PLGbhXmE1P2jB5ozjnXT6CagadMykaJW4Q/dhv0os= knative.dev/serving v0.38.2 h1:xxxC5JUwHzzssSeEWYs9DcuXlNtV9RHMX/VIlHlqa+M=
knative.dev/serving v0.38.0/go.mod h1:3/KPMVdVOZSHdrRhvelLuxuO+Ftln2ZTKq8R3gUu6Gw= knative.dev/serving v0.38.2/go.mod h1:/eWvWu4qo7+tkfc7FemNXcCtb3W8FNk1tCqAKC12WPo=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -21,11 +21,11 @@ VALIDATION_TESTS="make test"
source $(dirname $0)/../vendor/knative.dev/hack/release.sh source $(dirname $0)/../vendor/knative.dev/hack/release.sh
PIPELINE_ARTIFACTS="pkg/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml pkg/pipelines/resources/tekton/task/func-deploy/0.1/func-deploy.yaml pkg/pipelines/resources/tekton/task/func-s2i/0.1/func-s2i.yaml" PIPELINE_ARTIFACTS="pkg/pipelines/resources/tekton/task/func-buildpacks/0.2/func-buildpacks.yaml pkg/pipelines/resources/tekton/task/func-deploy/0.1/func-deploy.yaml pkg/pipelines/resources/tekton/task/func-s2i/0.1/func-s2i.yaml"
function build_release() { function build_release() {
echo "🚧 🐧 Building cross platform binaries: Linux 🐧 (amd64 / arm64 / ppc64le / s390x), MacOS 🍏, and Windows 🎠" echo "🚧 🐧 Building cross platform binaries: Linux 🐧 (amd64 / arm64 / ppc64le / s390x), MacOS 🍏, and Windows 🎠"
ETAG=${TAG} make cross-platform FUNC_REPO_BRANCH_REF="$(git branch --show-current)" ETAG=${TAG} make cross-platform
ARTIFACTS_TO_PUBLISH="func_darwin_amd64 func_darwin_arm64 func_linux_amd64 func_linux_arm64 func_linux_ppc64le func_linux_s390x func_windows_amd64.exe" ARTIFACTS_TO_PUBLISH="func_darwin_amd64 func_darwin_arm64 func_linux_amd64 func_linux_arm64 func_linux_ppc64le func_linux_s390x func_windows_amd64.exe"
ARTIFACTS_TO_PUBLISH="${ARTIFACTS_TO_PUBLISH} ${PIPELINE_ARTIFACTS}" ARTIFACTS_TO_PUBLISH="${ARTIFACTS_TO_PUBLISH} ${PIPELINE_ARTIFACTS}"

View File

@ -17,7 +17,7 @@ podman_pid=$!
DOCKER_HOST="unix://$(podman info -f '{{.Host.RemoteSocket.Path}}' 2> /dev/null)" DOCKER_HOST="unix://$(podman info -f '{{.Host.RemoteSocket.Path}}' 2> /dev/null)"
export DOCKER_HOST export DOCKER_HOST
go test -test.timeout=15m -tags integration ./... -v make test-integration
e=$? e=$?
kill -TERM "$podman_pid" > /dev/null 2>&1 kill -TERM "$podman_pid" > /dev/null 2>&1

View File

@ -27,18 +27,18 @@ import (
// DefaultName when no WithName option is provided to NewBuilder // DefaultName when no WithName option is provided to NewBuilder
const DefaultName = builders.Pack const DefaultName = builders.Pack
var DefaultBaseBuilder = "gcr.io/paketo-buildpacks/builder:base" var DefaultBaseBuilder = "ghcr.io/knative/builder-jammy-base:latest"
var DefaultRustBuilder = "gcr.io/paketo-buildpacks/builder:full-cf" var DefaultTinyBuilder = "ghcr.io/knative/builder-jammy-tiny:latest"
var ( var (
DefaultBuilderImages = map[string]string{ DefaultBuilderImages = map[string]string{
"node": DefaultBaseBuilder, "node": DefaultBaseBuilder,
"nodejs": DefaultBaseBuilder, "nodejs": DefaultBaseBuilder,
"typescript": DefaultBaseBuilder, "typescript": DefaultBaseBuilder,
"go": DefaultBaseBuilder, "go": DefaultTinyBuilder,
"python": DefaultBaseBuilder, "python": DefaultBaseBuilder,
"quarkus": DefaultBaseBuilder, "quarkus": DefaultTinyBuilder,
"rust": DefaultRustBuilder, "rust": DefaultBaseBuilder,
"springboot": DefaultBaseBuilder, "springboot": DefaultBaseBuilder,
} }
@ -50,11 +50,10 @@ var (
"docker.io/paketobuildpacks/", "docker.io/paketobuildpacks/",
"ghcr.io/vmware-tanzu/function-buildpacks-for-knative/", "ghcr.io/vmware-tanzu/function-buildpacks-for-knative/",
"gcr.io/buildpacks/", "gcr.io/buildpacks/",
"ghcr.io/knative/",
} }
defaultBuildpacks = map[string][]string{ defaultBuildpacks = map[string][]string{}
"go": {"paketo-buildpacks/go-dist", "ghcr.io/boson-project/go-function-buildpack:tip"},
}
) )
// Builder will build Function using Pack. // Builder will build Function using Pack.

View File

@ -24,7 +24,7 @@ import (
const ( const (
// DefaultRegistry through which containers of functions will be shuttled. // DefaultRegistry through which containers of functions will be shuttled.
DefaultRegistry = "docker.io" DefaultRegistry = "index.docker.io"
// DefaultTemplate is the default function signature / environmental context // DefaultTemplate is the default function signature / environmental context
// of the resultant function. All runtimes are expected to have at least // of the resultant function. All runtimes are expected to have at least

View File

@ -50,7 +50,7 @@ var (
// thus implicitly tests Create, Build and Deploy, which are exposed // thus implicitly tests Create, Build and Deploy, which are exposed
// by the client API for those who prefer manual transmissions. // by the client API for those who prefer manual transmissions.
func TestClient_New(t *testing.T) { func TestClient_New(t *testing.T) {
root := "testdata/example.com/testNew" root := "testdata/example.com/test-new"
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry), fn.WithVerbose(true)) client := fn.New(fn.WithRegistry(TestRegistry), fn.WithVerbose(true))
@ -210,7 +210,7 @@ func TestClient_New_RuntimeRequired(t *testing.T) {
// TestClient_New_NameDefaults ensures that a newly created function has its name defaulted // TestClient_New_NameDefaults ensures that a newly created function has its name defaulted
// to a name which can be derived from the last part of the given root path. // to a name which can be derived from the last part of the given root path.
func TestClient_New_NameDefaults(t *testing.T) { func TestClient_New_NameDefaults(t *testing.T) {
root := "testdata/example.com/testNameDefaults" root := "testdata/example.com/test-name-defaults"
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry)) client := fn.New(fn.WithRegistry(TestRegistry))
@ -230,7 +230,7 @@ func TestClient_New_NameDefaults(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
expected := "testNameDefaults" expected := "test-name-defaults"
if f.Name != expected { if f.Name != expected {
t.Fatalf("name was not defaulted. expected '%v' got '%v'", expected, f.Name) t.Fatalf("name was not defaulted. expected '%v' got '%v'", expected, f.Name)
} }
@ -239,7 +239,7 @@ func TestClient_New_NameDefaults(t *testing.T) {
// TestClient_New_WritesTemplate ensures the config file and files from the template // TestClient_New_WritesTemplate ensures the config file and files from the template
// are written on new. // are written on new.
func TestClient_New_WritesTemplate(t *testing.T) { func TestClient_New_WritesTemplate(t *testing.T) {
root := "testdata/example.com/testWritesTemplate" root := "testdata/example.com/test-writes-template"
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry)) client := fn.New(fn.WithRegistry(TestRegistry))
@ -262,7 +262,7 @@ func TestClient_New_WritesTemplate(t *testing.T) {
// TestClient_New_ExtantAborts ensures that a directory which contains an extant // TestClient_New_ExtantAborts ensures that a directory which contains an extant
// function does not reinitialize. // function does not reinitialize.
func TestClient_New_ExtantAborts(t *testing.T) { func TestClient_New_ExtantAborts(t *testing.T) {
root := "testdata/example.com/testExtantAborts" root := "testdata/example.com/test-extant-aborts"
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry)) client := fn.New(fn.WithRegistry(TestRegistry))
@ -305,7 +305,7 @@ func TestClient_New_NonemptyAborts(t *testing.T) {
// conjunction with other tools (.envrc, etc) // conjunction with other tools (.envrc, etc)
func TestClient_New_HiddenFilesIgnored(t *testing.T) { func TestClient_New_HiddenFilesIgnored(t *testing.T) {
// Create a directory for the function // Create a directory for the function
root := "testdata/example.com/testHiddenFilesIgnored" root := "testdata/example.com/test-hidden-files-ignored"
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry)) client := fn.New(fn.WithRegistry(TestRegistry))
@ -332,7 +332,7 @@ func TestClient_New_HiddenFilesIgnored(t *testing.T) {
// See the CLI for full details, but a standard default location is // See the CLI for full details, but a standard default location is
// $HOME/.config/func/repositories/boson/go/json // $HOME/.config/func/repositories/boson/go/json
func TestClient_New_RepositoriesExtensible(t *testing.T) { func TestClient_New_RepositoriesExtensible(t *testing.T) {
root := "testdata/example.com/testRepositoriesExtensible" root := "testdata/example.com/test-repositories-extensible"
defer Using(t, root)() defer Using(t, root)()
client := fn.New( client := fn.New(
@ -479,7 +479,7 @@ func TestClient_New_RegistryRequired(t *testing.T) {
// resultant OCI container is populated. // resultant OCI container is populated.
func TestClient_New_ImageNamePopulated(t *testing.T) { func TestClient_New_ImageNamePopulated(t *testing.T) {
// Create the root function directory // Create the root function directory
root := "testdata/example.com/testDeriveImage" root := "testdata/example.com/test-derive-image"
defer Using(t, root)() defer Using(t, root)()
// Create the function which calculates fields such as name and image. // Create the function which calculates fields such as name and image.
@ -508,7 +508,7 @@ func TestClient_New_ImageNamePopulated(t *testing.T) {
// For example "alice" becomes "docker.io/alice" // For example "alice" becomes "docker.io/alice"
func TestClient_New_ImageRegistryDefaults(t *testing.T) { func TestClient_New_ImageRegistryDefaults(t *testing.T) {
// Create the root function directory // Create the root function directory
root := "testdata/example.com/testDeriveImageDefaultRegistry" root := "testdata/example.com/test-derive-image-default-registry"
defer Using(t, root)() defer Using(t, root)()
// Create the function which calculates fields such as name and image. // Create the function which calculates fields such as name and image.
@ -533,9 +533,9 @@ func TestClient_New_ImageRegistryDefaults(t *testing.T) {
// Deploy (and confirms expected fields calculated). // Deploy (and confirms expected fields calculated).
func TestClient_New_Delegation(t *testing.T) { func TestClient_New_Delegation(t *testing.T) {
var ( var (
root = "testdata/example.com/testNewDelegates" // .. in which to initialize root = "testdata/example.com/test-new-delegates" // .. in which to initialize
expectedName = "testNewDelegates" // expected to be derived expectedName = "test-new-delegates" // expected to be derived
expectedImage = "example.com/alice/testNewDelegates:latest" expectedImage = "example.com/alice/test-new-delegates:latest"
builder = mock.NewBuilder() builder = mock.NewBuilder()
pusher = mock.NewPusher() pusher = mock.NewPusher()
deployer = mock.NewDeployer() deployer = mock.NewDeployer()
@ -607,7 +607,7 @@ func TestClient_New_Delegation(t *testing.T) {
// See TestClient_Runner for the test of the default runner implementation. // See TestClient_Runner for the test of the default runner implementation.
func TestClient_Run(t *testing.T) { func TestClient_Run(t *testing.T) {
// Create the root function directory // Create the root function directory
root := "testdata/example.com/testRun" root := "testdata/example.com/test-run"
defer Using(t, root)() defer Using(t, root)()
// client with the mock runner and the new test function // client with the mock runner and the new test function
@ -693,7 +693,7 @@ func TestClient_Runner(t *testing.T) {
// .func to .gitignore is an important externally visible "feature", an explicit // .func to .gitignore is an important externally visible "feature", an explicit
// test is warranted. // test is warranted.
func TestClient_Run_DataDir(t *testing.T) { func TestClient_Run_DataDir(t *testing.T) {
root := "testdata/example.com/testRunDataDir" root := "testdata/example.com/test-run-data-dir"
defer Using(t, root)() defer Using(t, root)()
// Create a function at root // Create a function at root
@ -795,9 +795,9 @@ func TestClient_RunTimeout(t *testing.T) {
// process, erroring if run on a directory uncreated. // process, erroring if run on a directory uncreated.
func TestClient_Update(t *testing.T) { func TestClient_Update(t *testing.T) {
var ( var (
root = "testdata/example.com/testUpdate" root = "testdata/example.com/test-update"
expectedName = "testUpdate" expectedName = "test-update"
expectedImage = "example.com/alice/testUpdate:latest" expectedImage = "example.com/alice/test-update:latest"
builder = mock.NewBuilder() builder = mock.NewBuilder()
pusher = mock.NewPusher() pusher = mock.NewPusher()
deployer = mock.NewDeployerWithResult(fn.DeploymentResult{ deployer = mock.NewDeployerWithResult(fn.DeploymentResult{
@ -954,8 +954,8 @@ func TestClient_Deploy_RegistryUpdate(t *testing.T) {
// the function with the name of the function at the provided root. // the function with the name of the function at the provided root.
func TestClient_Remove_ByPath(t *testing.T) { func TestClient_Remove_ByPath(t *testing.T) {
var ( var (
root = "testdata/example.com/testRemoveByPath" root = "testdata/example.com/test-remove-by-path"
expectedName = "testRemoveByPath" expectedName = "test-remove-by-path"
remover = mock.NewRemover() remover = mock.NewRemover()
) )
@ -993,8 +993,8 @@ func TestClient_Remove_ByPath(t *testing.T) {
// the function with the name of the function at the provided root. // the function with the name of the function at the provided root.
func TestClient_Remove_DeleteAll(t *testing.T) { func TestClient_Remove_DeleteAll(t *testing.T) {
var ( var (
root = "testdata/example.com/testRemoveDeleteAll" root = "testdata/example.com/test-remove-delete-all"
expectedName = "testRemoveDeleteAll" expectedName = "test-remove-delete-all"
remover = mock.NewRemover() remover = mock.NewRemover()
pipelinesProvider = mock.NewPipelinesProvider() pipelinesProvider = mock.NewPipelinesProvider()
deleteAll = true deleteAll = true
@ -1039,8 +1039,8 @@ func TestClient_Remove_DeleteAll(t *testing.T) {
// the function with the name of the function at the provided root. // the function with the name of the function at the provided root.
func TestClient_Remove_Dont_DeleteAll(t *testing.T) { func TestClient_Remove_Dont_DeleteAll(t *testing.T) {
var ( var (
root = "testdata/example.com/testRemoveDontDeleteAll" root = "testdata/example.com/test-remove-dont-delete-all"
expectedName = "testRemoveDontDeleteAll" expectedName = "test-remove-dont-delete-all"
remover = mock.NewRemover() remover = mock.NewRemover()
pipelinesProvider = mock.NewPipelinesProvider() pipelinesProvider = mock.NewPipelinesProvider()
deleteAll = false deleteAll = false
@ -1351,7 +1351,7 @@ func TestClient_Deploy_UnbuiltErrors(t *testing.T) {
// TestClient_New_BuilderImagesPersisted Asserts that the client preserves user- // TestClient_New_BuilderImagesPersisted Asserts that the client preserves user-
// provided Builder Images // provided Builder Images
func TestClient_New_BuildersPersisted(t *testing.T) { func TestClient_New_BuildersPersisted(t *testing.T) {
root := "testdata/example.com/testConfiguredBuilders" // Root from which to run the test root := "testdata/example.com/test-configured-builders" // Root from which to run the test
defer Using(t, root)() defer Using(t, root)()
client := fn.New(fn.WithRegistry(TestRegistry)) client := fn.New(fn.WithRegistry(TestRegistry))
@ -1388,7 +1388,7 @@ func TestClient_New_BuildersPersisted(t *testing.T) {
// TestClient_New_BuildpacksPersisted ensures that provided buildpacks are // TestClient_New_BuildpacksPersisted ensures that provided buildpacks are
// persisted on new functions. // persisted on new functions.
func TestClient_New_BuildpacksPersisted(t *testing.T) { func TestClient_New_BuildpacksPersisted(t *testing.T) {
root := "testdata/example.com/testConfiguredBuildpacks" // Root from which to run the test root := "testdata/example.com/test-configured-buildpacks" // Root from which to run the test
defer Using(t, root)() defer Using(t, root)()
buildpacks := []string{ buildpacks := []string{
@ -1504,7 +1504,7 @@ func TestClient_Runtimes(t *testing.T) {
// TestClient_New_Timestamp ensures that the creation timestamp is set on functions // TestClient_New_Timestamp ensures that the creation timestamp is set on functions
// which are successfully initialized using the client library. // which are successfully initialized using the client library.
func TestClient_New_Timestamp(t *testing.T) { func TestClient_New_Timestamp(t *testing.T) {
root := "testdata/example.com/testCreateStamp" root := "testdata/example.com/test-create-stamp"
defer Using(t, root)() defer Using(t, root)()
start := time.Now() start := time.Now()
@ -1526,7 +1526,7 @@ func TestClient_New_Timestamp(t *testing.T) {
// function using a simple HTTP POST method with the invoke message as form // function using a simple HTTP POST method with the invoke message as form
// field values (as though a simple form were posted). // field values (as though a simple form were posted).
func TestClient_Invoke_HTTP(t *testing.T) { func TestClient_Invoke_HTTP(t *testing.T) {
root := "testdata/example.com/testInvokeHTTP" root := "testdata/example.com/test-invoke-http"
defer Using(t, root)() defer Using(t, root)()
// Flag indicating the function was invoked // Flag indicating the function was invoked
@ -1639,7 +1639,7 @@ func TestClient_Invoke_HTTP(t *testing.T) {
// the invoker is sending the invocation message as a CloudEvent rather than // the invoker is sending the invocation message as a CloudEvent rather than
// a standard HTTP form POST. // a standard HTTP form POST.
func TestClient_Invoke_CloudEvent(t *testing.T) { func TestClient_Invoke_CloudEvent(t *testing.T) {
root := "testdata/example.com/testInvokeCloudEvent" root := "testdata/example.com/test-invoke-cloud-event"
defer Using(t, root)() defer Using(t, root)()
var ( var (
@ -1730,7 +1730,7 @@ func TestClient_Invoke_CloudEvent(t *testing.T) {
// TestClient_Instances ensures that when a function is run (locally) its metadata // TestClient_Instances ensures that when a function is run (locally) its metadata
// is available to other clients inspecting the same function using .Instances // is available to other clients inspecting the same function using .Instances
func TestClient_Instances(t *testing.T) { func TestClient_Instances(t *testing.T) {
root := "testdata/example.com/testInstances" root := "testdata/example.com/test-instances"
defer Using(t, root)() defer Using(t, root)()
// A mock runner // A mock runner

View File

@ -10,6 +10,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/google/go-containerregistry/pkg/name"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
fnlabels "knative.dev/func/pkg/k8s/labels" fnlabels "knative.dev/func/pkg/k8s/labels"
"knative.dev/pkg/ptr" "knative.dev/pkg/ptr"
@ -567,24 +568,19 @@ func (f Function) ImageName() (image string, err error) {
f.Registry = strings.Trim(f.Registry, "/") // too defensive? f.Registry = strings.Trim(f.Registry, "/") // too defensive?
registryTokens := strings.Split(f.Registry, "/")
if len(registryTokens) == 1 { // only namespace provided: ex. 'alice'
image = DefaultRegistry + "/" + f.Registry + "/" + f.Name
} else if len(registryTokens) == 2 || len(registryTokens) == 3 {
// registry/namespace ('quay.io/alice') or
// registry/parent-namespace/namespace ('quay.io/project/alice') provided
image = f.Registry + "/" + f.Name
} else if len(registryTokens) > 3 { // the name of the image is also provided `quay.io/alice/my.function.name`
return "", fmt.Errorf("registry should be either 'namespace', 'registry/namespace' or 'registry/parent/namespace', the name of the image will be derived from the function name")
}
// Explicitly append :latest tag. We expect source control to drive // Explicitly append :latest tag. We expect source control to drive
// versioning, rather than rely on image tags with explicitly pinned version // versioning, rather than rely on image tags with explicitly pinned version
// numbers, as is seen in many serverless solutions. This will be updated // numbers, as is seen in many serverless solutions. This will be updated
// to branch name when we add source-driven canary/ bluegreen deployments. // to branch name when we add source-driven canary/ bluegreen deployments.
// For pinning to an exact container image, see ImageWithDigest // For pinning to an exact container image, see ImageWithDigest
return image + ":latest", nil refStr := f.Registry + "/" + f.Name + ":latest"
ref, err := name.ParseReference(refStr)
if err != nil {
return "", fmt.Errorf("cannot determine function image: %w", err)
}
return ref.Name(), nil
} }
// Format yaml unmarshall error to be more human friendly. // Format yaml unmarshall error to be more human friendly.

View File

@ -140,29 +140,38 @@ func TestFunction_ImageName(t *testing.T) {
err error err error
) )
tests := []struct { tests := []struct {
registry string
name string name string
registry string
funcName string
expectedImage string expectedImage string
expectError bool expectError bool
}{ }{
{"alice", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false}, {"short-name", "alice", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false},
{"quay.io/alice", "myfunc", "quay.io/alice/myfunc:latest", false}, {"short-name-trailing-slash", "alice/", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false},
{"docker.io/alice", "myfunc", "docker.io/alice/myfunc:latest", false}, {"full-name-quay-io", "quay.io/alice", "myfunc", "quay.io/alice/myfunc:latest", false},
{"docker.io/alice/sub", "myfunc", "docker.io/alice/sub/myfunc:latest", false}, {"full-name-docker-io", "docker.io/alice", "myfunc", DefaultRegistry + "/alice/myfunc:latest", false},
{"alice", "", "", true}, {"full-name-with-sub-path", "docker.io/alice/sub", "myfunc", DefaultRegistry + "/alice/sub/myfunc:latest", false},
{"", "myfunc", "", true}, {"localhost-direct", "localhost:5000", "myfunc", "localhost:5000/myfunc:latest", false},
{"full-name-with-sub-sub-path", "us-central1-docker.pkg.dev/my-gcpproject/team/user", "myfunc", "us-central1-docker.pkg.dev/my-gcpproject/team/user/myfunc:latest", false},
{"missing-func-name", "alice", "", "", true},
{"missing-registry", "", "myfunc", "", true},
} }
for _, test := range tests { for _, test := range tests {
f = Function{Registry: test.registry, Name: test.name} t.Run(test.name, func(t *testing.T) {
got, err = f.ImageName() f = Function{Registry: test.registry, Name: test.funcName}
if test.expectError && err == nil { got, err = f.ImageName()
t.Errorf("registry '%v' and name '%v' did not yield the expected error", if test.expectError && err == nil {
test.registry, test.name) t.Errorf("registry '%v' and name '%v' did not yield the expected error",
} test.registry, test.funcName)
if got != test.expectedImage { }
t.Errorf("expected registry '%v' name '%v' to yield image '%v', got '%v'", if !test.expectError && err != nil {
test.registry, test.name, test.expectedImage, got) t.Errorf("unexpected error: %v", err)
} }
if got != test.expectedImage {
t.Errorf("expected registry '%v' name '%v' to yield image '%v', got '%v'",
test.registry, test.funcName, test.expectedImage, got)
}
})
} }
} }

View File

@ -71,7 +71,7 @@ var TarImage = "quay.io/boson/alpine-socat:1.7.4.3-r1-non-root"
// UploadToVolume uploads files (passed in form of tar stream) into volume. // UploadToVolume uploads files (passed in form of tar stream) into volume.
func UploadToVolume(ctx context.Context, content io.Reader, claimName, namespace string) error { func UploadToVolume(ctx context.Context, content io.Reader, claimName, namespace string) error {
return runWithVolumeMounted(ctx, TarImage, []string{"tar", "-xmf", "-"}, content, claimName, namespace) return runWithVolumeMounted(ctx, TarImage, []string{"sh", "-c", "umask 0000 && exec tar -xmf -"}, content, claimName, namespace)
} }
// Runs a pod with given image, command and stdin // Runs a pod with given image, command and stdin

View File

@ -13,8 +13,8 @@ func defaultPodSecurityContext() *corev1.PodSecurityContext {
if IsOpenShift() { if IsOpenShift() {
return nil return nil
} }
runAsUser := int64(1000) runAsUser := int64(1001)
runAsGroup := int64(1000) runAsGroup := int64(1002)
return &corev1.PodSecurityContext{ return &corev1.PodSecurityContext{
RunAsUser: &runAsUser, RunAsUser: &runAsUser,
RunAsGroup: &runAsGroup, RunAsGroup: &runAsGroup,

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strconv" "strconv"
"syscall" "syscall"
"time" "time"
@ -235,6 +236,9 @@ func processExists(pid string) bool {
if err != nil { if err != nil {
return false return false
} }
if runtime.GOOS == "windows" {
return true
}
err = process.Signal(syscall.Signal(0)) err = process.Signal(syscall.Signal(0))
return err == nil return err == nil
} }
@ -276,7 +280,11 @@ func updateLastLink(cfg *buildConfig) error {
fmt.Printf("ln -s %v %v\n", cfg.buildDir(), cfg.lastLink()) fmt.Printf("ln -s %v %v\n", cfg.buildDir(), cfg.lastLink())
} }
_ = os.RemoveAll(cfg.lastLink()) _ = os.RemoveAll(cfg.lastLink())
return os.Symlink(cfg.buildDir(), cfg.lastLink()) rp, err := filepath.Rel(filepath.Dir(cfg.lastLink()), cfg.buildDir())
if err != nil {
return err
}
return os.Symlink(rp, cfg.lastLink())
} }
// toPlatforms converts func's implementation-agnostic Platform struct // toPlatforms converts func's implementation-agnostic Platform struct

View File

@ -14,6 +14,7 @@ import (
"runtime" "runtime"
"sort" "sort"
"strings" "strings"
"sync"
"testing" "testing"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
@ -81,7 +82,7 @@ func TestBuilder_Concurrency(t *testing.T) {
var ( var (
pausedCh = make(chan bool) pausedCh = make(chan bool)
continueCh = make(chan bool) continueCh = make(chan bool)
doneCh = make(chan bool) wg sync.WaitGroup
) )
// Build A // Build A
@ -93,12 +94,11 @@ func TestBuilder_Concurrency(t *testing.T) {
} }
return return
} }
builder1.onDone = func() { wg.Add(1)
doneCh <- true // Notify of being done
}
go func() { go func() {
defer wg.Done()
if err := builder1.Build(context.Background(), f, TestPlatforms); err != nil { if err := builder1.Build(context.Background(), f, TestPlatforms); err != nil {
fmt.Fprintf(os.Stderr, "test build error %v", err) t.Errorf("test build error: %v", err)
} }
}() }()
@ -107,16 +107,21 @@ func TestBuilder_Concurrency(t *testing.T) {
// Build B // Build B
builder2 := NewBuilder("builder2", true) builder2 := NewBuilder("builder2", true)
builder2.buildFn = func(config *buildConfig, platform v1.Platform) (v1.Descriptor, v1.Layer, error) {
return v1.Descriptor{}, nil, fmt.Errorf("the buildFn should not have been invoked")
}
wg.Add(1)
go func() { go func() {
defer wg.Done()
err = builder2.Build(context.Background(), f, TestPlatforms) err = builder2.Build(context.Background(), f, TestPlatforms)
if !errors.As(err, &ErrBuildInProgress{}) { if !errors.As(err, &ErrBuildInProgress{}) {
fmt.Fprintf(os.Stderr, "test build error %v", err) t.Errorf("test build error: %v", err)
} }
}() }()
// Release the blocking Build A and wait until complete. // Release the blocking Build A and wait until complete.
continueCh <- true continueCh <- true
<-doneCh wg.Wait()
} }
func isFirstBuild(cfg *buildConfig, current v1.Platform) bool { func isFirstBuild(cfg *buildConfig, current v1.Platform) bool {

View File

@ -0,0 +1,230 @@
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: func-buildpacks
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/categories: Image Build
tekton.dev/pipelines.minVersion: "0.17.0"
tekton.dev/tags: image-build
tekton.dev/displayName: "Knative Functions Buildpacks"
tekton.dev/platforms: "linux/amd64"
spec:
description: >-
The Knative Functions Buildpacks task builds source into a container image and pushes it to a registry,
using Cloud Native Buildpacks. This task is based on the Buildpacks Tekton task v 0.4.
workspaces:
- name: source
description: Directory where application source is located.
- name: cache
description: Directory where cache is stored (when no cache image is provided).
optional: true
- name: dockerconfig
description: >-
An optional workspace that allows providing a .docker/config.json file
for Buildpacks lifecycle binary to access the container registry.
The file should be placed at the root of the Workspace with name config.json.
optional: true
params:
- name: APP_IMAGE
description: The name of where to store the app image.
- name: REGISTRY
description: The registry associated with the function image.
- name: BUILDER_IMAGE
description: The image on which builds will run (must include lifecycle and compatible buildpacks).
- name: SOURCE_SUBPATH
description: A subpath within the `source` input where the source to build is located.
default: ""
- name: ENV_VARS
type: array
description: Environment variables to set during _build-time_.
default: []
- name: RUN_IMAGE
description: Reference to a run image to use.
default: ""
- name: CACHE_IMAGE
description: The name of the persistent app cache image (if no cache workspace is provided).
default: ""
- name: SKIP_RESTORE
description: Do not write layer metadata or restore cached layers.
default: "false"
- name: USER_ID
description: The user ID of the builder image user.
default: "1001"
- name: GROUP_ID
description: The group ID of the builder image user.
default: "0"
##############################################################
##### "default" has been changed to "0" for Knative Functions
- name: PLATFORM_DIR
description: The name of the platform directory.
default: empty-dir
results:
- name: IMAGE_DIGEST
description: The digest of the built `APP_IMAGE`.
stepTemplate:
env:
- name: CNB_PLATFORM_API
value: "0.10"
steps:
- name: prepare
image: docker.io/library/bash:5.1.4@sha256:b208215a4655538be652b2769d82e576bc4d0a2bb132144c060efc5be8c3f5d6
args:
- "--env-vars"
- "$(params.ENV_VARS[*])"
script: |
#!/usr/bin/env bash
set -e
if [[ "$(workspaces.cache.bound)" == "true" ]]; then
echo "> Setting permissions on '$(workspaces.cache.path)'..."
chown -R "$(params.USER_ID):$(params.GROUP_ID)" "$(workspaces.cache.path)"
fi
#######################################################
##### "/emptyDir" has been added for Knative Functions
for path in "/tekton/home" "/layers" "/emptyDir" "$(workspaces.source.path)"; do
echo "> Setting permissions on '$path'..."
chown -R "$(params.USER_ID):$(params.GROUP_ID)" "$path"
if [[ "$path" == "$(workspaces.source.path)" ]]; then
chmod 775 "$(workspaces.source.path)"
fi
done
echo "> Parsing additional configuration..."
parsing_flag=""
envs=()
for arg in "$@"; do
if [[ "$arg" == "--env-vars" ]]; then
echo "-> Parsing env variables..."
parsing_flag="env-vars"
elif [[ "$parsing_flag" == "env-vars" ]]; then
envs+=("$arg")
fi
done
echo "> Processing any environment variables..."
ENV_DIR="/platform/env"
echo "--> Creating 'env' directory: $ENV_DIR"
mkdir -p "$ENV_DIR"
for env in "${envs[@]}"; do
IFS='=' read -r key value <<< "$env"
if [[ "$key" != "" && "$value" != "" ]]; then
path="${ENV_DIR}/${key}"
echo "--> Writing ${path}..."
echo -n "$value" > "$path"
fi
done
############################################
##### Added part for Knative Functions #####
############################################
func_file="$(workspaces.source.path)/func.yaml"
if [ "$(params.SOURCE_SUBPATH)" != "" ]; then
func_file="$(workspaces.source.path)/$(params.SOURCE_SUBPATH)/func.yaml"
fi
echo "--> Saving 'func.yaml'"
cp $func_file /emptyDir/func.yaml
############################################
volumeMounts:
- name: layers-dir
mountPath: /layers
- name: $(params.PLATFORM_DIR)
mountPath: /platform
########################################################
##### "/emptyDir" has been added for Knative Functions
- name: empty-dir
mountPath: /emptyDir
- name: create
image: $(params.BUILDER_IMAGE)
imagePullPolicy: Always
command: ["/cnb/lifecycle/creator"]
env:
- name: DOCKER_CONFIG
value: $(workspaces.dockerconfig.path)
args:
- "-app=$(workspaces.source.path)/$(params.SOURCE_SUBPATH)"
- "-cache-dir=$(workspaces.cache.path)"
- "-cache-image=$(params.CACHE_IMAGE)"
- "-uid=$(params.USER_ID)"
- "-gid=$(params.GROUP_ID)"
- "-layers=/layers"
- "-platform=/platform"
- "-report=/layers/report.toml"
- "-skip-restore=$(params.SKIP_RESTORE)"
- "-previous-image=$(params.APP_IMAGE)"
- "-run-image=$(params.RUN_IMAGE)"
- "$(params.APP_IMAGE)"
volumeMounts:
- name: layers-dir
mountPath: /layers
- name: $(params.PLATFORM_DIR)
mountPath: /platform
securityContext:
runAsUser: 1001
#################################################################
##### "runAsGroup" has been changed to "0" for Knative Functions
runAsGroup: 0
- name: results
image: docker.io/library/bash:5.1.4@sha256:b208215a4655538be652b2769d82e576bc4d0a2bb132144c060efc5be8c3f5d6
script: |
#!/usr/bin/env bash
set -e
cat /layers/report.toml | grep "digest" | cut -d'"' -f2 | cut -d'"' -f2 | tr -d '\n' | tee $(results.IMAGE_DIGEST.path)
############################################
##### Added part for Knative Functions #####
############################################
digest=$(cat $(results.IMAGE_DIGEST.path))
func_file="$(workspaces.source.path)/func.yaml"
if [ "$(params.SOURCE_SUBPATH)" != "" ]; then
func_file="$(workspaces.source.path)/$(params.SOURCE_SUBPATH)/func.yaml"
fi
if [[ ! -f "$func_file" ]]; then
echo "--> Restoring 'func.yaml'"
mkdir -p "$(workspaces.source.path)/$(params.SOURCE_SUBPATH)"
cp /emptyDir/func.yaml $func_file
fi
echo ""
sed -i "s|^image:.*$|image: $(params.APP_IMAGE)|" "$func_file"
echo "Function image name: $(params.APP_IMAGE)"
sed -i "s/^imageDigest:.*$/imageDigest: $digest/" "$func_file"
echo "Function image digest: $digest"
sed -i "s|^registry:.*$|registry: $(params.REGISTRY)|" "$func_file"
echo "Function image registry: $(params.REGISTRY)"
############################################
volumeMounts:
- name: layers-dir
mountPath: /layers
########################################################
##### "/emptyDir" has been added for Knative Functions
- name: empty-dir
mountPath: /emptyDir
volumes:
- name: empty-dir
emptyDir: {}
- name: layers-dir
emptyDir: {}

View File

@ -60,7 +60,7 @@ spec:
description: Digest of the image just built. description: Digest of the image just built.
steps: steps:
- name: generate - name: generate
image: quay.io/openshift-pipeline/s2i:nightly image: quay.io/boson/s2i:latest
workingDir: $(workspaces.source.path) workingDir: $(workspaces.source.path)
args: ["$(params.ENV_VARS[*])"] args: ["$(params.ENV_VARS[*])"]
script: | script: |
@ -99,7 +99,7 @@ spec:
- mountPath: /env-vars - mountPath: /env-vars
name: env-vars name: env-vars
- name: build - name: build
image: quay.io/buildah/stable:v1.27.0 image: quay.io/buildah/stable:v1.31.0
workingDir: /gen-source workingDir: /gen-source
script: | script: |
TLS_VERIFY_FLAG="" TLS_VERIFY_FLAG=""

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"knative.dev/pkg/apis" "knative.dev/pkg/apis"
"knative.dev/func/pkg/builders/buildpacks"
"knative.dev/func/pkg/docker" "knative.dev/func/pkg/docker"
fn "knative.dev/func/pkg/functions" fn "knative.dev/func/pkg/functions"
"knative.dev/func/pkg/k8s" "knative.dev/func/pkg/k8s"
@ -79,7 +80,7 @@ func TestGitlab(t *testing.T) {
URL: strings.TrimSuffix(glabEnv.HTTPProjectURL, ".git"), URL: strings.TrimSuffix(glabEnv.HTTPProjectURL, ".git"),
Revision: "devel", Revision: "devel",
}, },
BuilderImages: map[string]string{"pack": "docker.io/paketobuildpacks/builder:tiny"}, BuilderImages: map[string]string{"pack": buildpacks.DefaultTinyBuilder},
Builder: "pack", Builder: "pack",
PVCSize: "256Mi", PVCSize: "256Mi",
}, },

View File

@ -16,6 +16,7 @@ import (
"testing" "testing"
"time" "time"
"knative.dev/func/pkg/builders/buildpacks"
"knative.dev/func/pkg/docker" "knative.dev/func/pkg/docker"
fn "knative.dev/func/pkg/functions" fn "knative.dev/func/pkg/functions"
"knative.dev/func/pkg/pipelines/tekton" "knative.dev/func/pkg/pipelines/tekton"
@ -153,12 +154,12 @@ func createSimpleGoProject(t *testing.T, ns string) fn.Function {
t.Fatal(err) t.Fatal(err)
} }
err = os.WriteFile(filepath.Join(projDir, "main.go"), []byte(simpleGOSvc), 0644) err = os.WriteFile(filepath.Join(projDir, "handle.go"), []byte(simpleGOSvc), 0644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
err = os.WriteFile(filepath.Join(projDir, "go.mod"), []byte("module web\n\ngo 1.20\n"), 0644) err = os.WriteFile(filepath.Join(projDir, "go.mod"), []byte("module function\n\ngo 1.20\n"), 0644)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -173,7 +174,7 @@ func createSimpleGoProject(t *testing.T, ns string) fn.Function {
Invoke: "none", Invoke: "none",
Build: fn.BuildSpec{ Build: fn.BuildSpec{
BuilderImages: map[string]string{ BuilderImages: map[string]string{
"pack": "docker.io/paketobuildpacks/builder:base", "pack": buildpacks.DefaultTinyBuilder,
"s2i": "registry.access.redhat.com/ubi8/go-toolset", "s2i": "registry.access.redhat.com/ubi8/go-toolset",
}, },
}, },
@ -189,40 +190,16 @@ func createSimpleGoProject(t *testing.T, ns string) fn.Function {
return f return f
} }
const simpleGOSvc = `package main const simpleGOSvc = `package function
import ( import (
"context" "context"
"net"
"net/http" "net/http"
"os"
"os/signal"
"syscall"
) )
func main() { func Handle(ctx context.Context, resp http.ResponseWriter, req *http.Request) {
sigs := make(chan os.Signal, 5) resp.Header().Add("Content-Type", "text/plain")
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) resp.WriteHeader(200)
_, _ = resp.Write([]byte("Hello World!\n"))
s := http.Server{
Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Add("Content-Type", "text/plain")
resp.WriteHeader(200)
_, _ = resp.Write([]byte("OK"))
}),
}
go func() {
<-sigs
_ = s.Shutdown(context.Background())
}()
port := "8080"
if p, ok := os.LookupEnv("PORT"); ok {
port = p
}
l, err := net.Listen("tcp4", ":"+port)
if err != nil {
panic(err)
}
_ = s.Serve(l)
} }
` `

View File

@ -30,7 +30,8 @@ func (pp *PipelinesProvider) ConfigurePAC(ctx context.Context, f fn.Function, me
return fmt.Errorf("incorrect type of pipelines metadata: %T", metadata) return fmt.Errorf("incorrect type of pipelines metadata: %T", metadata)
} }
if err := validatePipeline(f); err != nil { var err error
if err = validatePipeline(f); err != nil {
return err return err
} }

View File

@ -118,8 +118,8 @@ func NewPipelinesProvider(opts ...Opt) *PipelinesProvider {
// After the PipelineRun is being initialized, the progress of the PipelineRun is being watched and printed to the output. // After the PipelineRun is being initialized, the progress of the PipelineRun is being watched and printed to the output.
func (pp *PipelinesProvider) Run(ctx context.Context, f fn.Function) error { func (pp *PipelinesProvider) Run(ctx context.Context, f fn.Function) error {
pp.progressListener.Increment("Creating Pipeline resources") pp.progressListener.Increment("Creating Pipeline resources")
var err error
if err := validatePipeline(f); err != nil { if err = validatePipeline(f); err != nil {
return err return err
} }
@ -252,10 +252,26 @@ func sourcesAsTarStream(f fn.Function) *io.PipeReader {
pr, pw := io.Pipe() pr, pw := io.Pipe()
const nobodyID = 65534
const up = ".." + string(os.PathSeparator) const up = ".." + string(os.PathSeparator)
go func() { go func() {
tw := tar.NewWriter(pw) tw := tar.NewWriter(pw)
err := filepath.Walk(f.Root, func(p string, fi fs.FileInfo, err error) error {
err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeDir,
Name: "source/",
Mode: 0777,
Uid: nobodyID,
Gid: nobodyID,
Uname: "nobody",
Gname: "nobody",
})
if err != nil {
_ = pw.CloseWithError(fmt.Errorf("error while creating tar stream from sources: %w", err))
}
err = filepath.Walk(f.Root, func(p string, fi fs.FileInfo, err error) error {
if err != nil { if err != nil {
return fmt.Errorf("error traversing function directory: %w", err) return fmt.Errorf("error traversing function directory: %w", err)
} }

View File

@ -27,10 +27,7 @@ const (
pipelineRunFilenamePAC = "pipeline-run-pac.yaml" pipelineRunFilenamePAC = "pipeline-run-pac.yaml"
// Tasks references for PAC PipelineRun that are defined in the annotations // Tasks references for PAC PipelineRun that are defined in the annotations
taskGitCloneRef = "git-clone" taskGitCloneRef = "git-clone"
taskFuncS2iPACPipelineRunRef = "https://raw.githubusercontent.com/%s/%s/pkg/pipelines/resources/tekton/task/func-s2i/0.1/func-s2i.yaml"
taskFuncBuildpacksPACPipelineRunRef = "https://raw.githubusercontent.com/%s/%s/pkg/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml"
taskFuncDeployPACPipelineRunRef = "https://raw.githubusercontent.com/%s/%s/pkg/pipelines/resources/tekton/task/func-deploy/0.1/func-deploy.yaml"
// Following section contains references for Tasks to be used in Pipeline templates, // Following section contains references for Tasks to be used in Pipeline templates,
// there is a difference if we use PAC approach or standard Tekton approach. // there is a difference if we use PAC approach or standard Tekton approach.
@ -93,6 +90,12 @@ const (
var ( var (
FuncRepoRef = "knative/func" FuncRepoRef = "knative/func"
FuncRepoBranchRef = "main" FuncRepoBranchRef = "main"
taskBasePath = "https://raw.githubusercontent.com/" +
FuncRepoRef + "/" + FuncRepoBranchRef + "/pkg/pipelines/resources/tekton/task/"
BuildpackTaskURL = taskBasePath + "func-buildpacks/0.2/func-buildpacks.yaml"
S2ITaskURL = taskBasePath + "func-s2i/0.1/func-s2i.yaml"
DeployTaskURL = taskBasePath + "func-deploy/0.1/func-deploy.yaml"
) )
type templateData struct { type templateData struct {
@ -206,9 +209,9 @@ func createPipelineRunTemplatePAC(f fn.Function, labels map[string]string) error
PipelinesTargetBranch: pipelinesTargetBranch, PipelinesTargetBranch: pipelinesTargetBranch,
GitCloneTaskRef: taskGitCloneRef, GitCloneTaskRef: taskGitCloneRef,
FuncBuildpacksTaskRef: fmt.Sprintf(taskFuncBuildpacksPACPipelineRunRef, FuncRepoRef, FuncRepoBranchRef), FuncBuildpacksTaskRef: BuildpackTaskURL,
FuncS2iTaskRef: fmt.Sprintf(taskFuncS2iPACPipelineRunRef, FuncRepoRef, FuncRepoBranchRef), FuncS2iTaskRef: S2ITaskURL,
FuncDeployTaskRef: fmt.Sprintf(taskFuncDeployPACPipelineRunRef, FuncRepoRef, FuncRepoBranchRef), FuncDeployTaskRef: DeployTaskURL,
PipelineYamlURL: fmt.Sprintf("%s/%s", resourcesDirectory, pipelineFileNamePAC), PipelineYamlURL: fmt.Sprintf("%s/%s", resourcesDirectory, pipelineFileNamePAC),
@ -284,10 +287,13 @@ func deleteAllPipelineTemplates(f fn.Function) string {
} }
func getTaskSpec(taskUrlTemplate string) (string, error) { func getTaskSpec(taskUrlTemplate string) (string, error) {
resp, err := http.Get(fmt.Sprintf(taskUrlTemplate, FuncRepoRef, FuncRepoBranchRef)) resp, err := http.Get(taskUrlTemplate)
if err != nil { if err != nil {
return "", err return "", err
} }
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("cannot get task: %q bad http code: %d", taskUrlTemplate, resp.StatusCode)
}
defer resp.Body.Close() defer resp.Body.Close()
var data map[string]any var data map[string]any
dec := yaml.NewDecoder(resp.Body) dec := yaml.NewDecoder(resp.Body)
@ -337,9 +343,9 @@ func createAndApplyPipelineTemplate(f fn.Function, namespace string, labels map[
ref string ref string
field *string field *string
}{ }{
{taskFuncBuildpacksPACPipelineRunRef, &data.FuncBuildpacksTaskRef}, {BuildpackTaskURL, &data.FuncBuildpacksTaskRef},
{taskFuncS2iPACPipelineRunRef, &data.FuncS2iTaskRef}, {S2ITaskURL, &data.FuncS2iTaskRef},
{taskFuncDeployPACPipelineRunRef, &data.FuncDeployTaskRef}, {DeployTaskURL, &data.FuncDeployTaskRef},
} { } {
ts, err := getTaskSpec(val.ref) ts, err := getTaskSpec(val.ref)
if err != nil { if err != nil {

View File

@ -21,7 +21,7 @@ type ErrRuntimeNotSupported struct {
} }
func (e ErrRuntimeNotSupported) Error() string { func (e ErrRuntimeNotSupported) Error() string {
return fmt.Sprintf("runtime %q is not supported for on cluster build", e.Runtime) return fmt.Sprintf("runtime %q is not supported for on cluster build with default builders", e.Runtime)
} }
func validatePipeline(f fn.Function) error { func validatePipeline(f fn.Function) error {
@ -30,10 +30,6 @@ func validatePipeline(f fn.Function) error {
return ErrRuntimeRequired return ErrRuntimeRequired
} }
if f.Runtime == "go" || f.Runtime == "rust" {
return ErrRuntimeNotSupported{f.Runtime}
}
if len(f.Build.Buildpacks) > 0 { if len(f.Build.Buildpacks) > 0 {
return ErrBuilpacksNotSupported return ErrBuilpacksNotSupported
} }

View File

@ -70,9 +70,9 @@ func Test_validatePipeline(t *testing.T) {
wantErr: true, wantErr: true,
}, },
{ {
name: "Unsupported runtime - Go - pack builder - without additional Buildpacks", name: "Supported runtime - Go - pack builder",
function: fn.Function{Build: fn.BuildSpec{Builder: builders.Pack}, Runtime: "go"}, function: fn.Function{Build: fn.BuildSpec{Builder: builders.Pack}, Runtime: "go"},
wantErr: true, wantErr: false,
}, },
{ {
name: "Unsupported runtime - Go - pack builder - with additional Buildpacks", name: "Unsupported runtime - Go - pack builder - with additional Buildpacks",
@ -95,9 +95,9 @@ func Test_validatePipeline(t *testing.T) {
wantErr: false, wantErr: false,
}, },
{ {
name: "Unsupported runtime - Rust - pack builder - without additional Buildpacks", name: "Supported runtime - Rust - pack builder",
function: fn.Function{Build: fn.BuildSpec{Builder: builders.Pack}, Runtime: "rust"}, function: fn.Function{Build: fn.BuildSpec{Builder: builders.Pack}, Runtime: "rust"},
wantErr: true, wantErr: false,
}, },
{ {
name: "Unsupported runtime - Rust - s2i builder", name: "Unsupported runtime - Rust - s2i builder",

View File

@ -1,7 +1,7 @@
## ##
## Bundle of CA Root Certificates ## Bundle of CA Root Certificates
## ##
## Certificate data from Mozilla as of: Tue May 30 03:12:04 2023 GMT ## Certificate data from Mozilla as of: Tue Aug 22 03:12:04 2023 GMT
## ##
## This is a bundle of X.509 certificates of public Certificate Authorities ## This is a bundle of X.509 certificates of public Certificate Authorities
## (CA). These were automatically extracted from Mozilla's root certificates ## (CA). These were automatically extracted from Mozilla's root certificates
@ -14,7 +14,7 @@
## Just configure this file as the SSLCACertificateFile. ## Just configure this file as the SSLCACertificateFile.
## ##
## Conversion done with mk-ca-bundle.pl version 1.29. ## Conversion done with mk-ca-bundle.pl version 1.29.
## SHA256: c47475103fb05bb562bbadff0d1e72346b03236154e1448a6ca191b740f83507 ## SHA256: 0ff137babc6a5561a9cfbe9f29558972e5b528202681b7d3803d03a3e82922bd
## ##
@ -3222,55 +3222,6 @@ AwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozmut6Dacpps6kFtZaSF4fC0urQe87YQVt8
rgIwRt7qy12a7DLCZRawTDBcMPPaTnOGBtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR rgIwRt7qy12a7DLCZRawTDBcMPPaTnOGBtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR
-----END CERTIFICATE----- -----END CERTIFICATE-----
E-Tugra Global Root CA RSA v3
=============================
-----BEGIN CERTIFICATE-----
MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQELBQAwgYAxCzAJ
BgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAb
BgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290
IENBIFJTQSB2MzAeFw0yMDAzMTgwOTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJU
UjEPMA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRF
LVR1Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBSU0Eg
djMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J77gnJY9LTQ91ew6aEOErx
jYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscxuj7X/iWpKo429NEvx7epXTPcMHD4QGxL
sqYxYdE0PD0xesevxKenhOGXpOhL9hd87jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF
/YP9f4RtNGx/ardLAQO/rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8q
QedmCeFLl+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bGwzrw
bMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4znKS4iicvObpCdg6
04nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBOM/J+JjKsBY04pOZ2PJ8QaQ5tndLB
eSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiM
bIedBi3x7+PmBvrFZhNb/FAHnnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbg
h3cXTJ2w2AmoDVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD
AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSytK7mLfcm1ap1
LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAImocn+M684uGMQQ
gC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN4
38o2Fi+CiJ+8EUdPdk3ILY7r3y18Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/q
ln0F7psTpURs+APQ3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3s
SdPkvmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn99t2HVhjY
sCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQmhty3QUBjYZgv6Rn7rWl
DdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YAVSgU7NbHEqIbZULpkejLPoeJVF3Zr52X
nGnnCv8PWniLYypMfUeUP95L6VPQMPHF9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFH
IK+WEj5jlB0E5y67hscMmoi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiX
YY60MGo8bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ
-----END CERTIFICATE-----
E-Tugra Global Root CA ECC v3
=============================
-----BEGIN CERTIFICATE-----
MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMwgYAxCzAJBgNV
BAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAbBgNV
BAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENB
IEVDQyB2MzAeFw0yMDAzMTgwOTQ2NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEP
MA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1
Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBFQ0MgdjMw
djAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQKczLWYHMjLiSF4mDKpL2
w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YKfWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31
Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQ
zPUwHQYDVR0OBBYEFP+CMXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO
PQQDAwNpADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/67W4W
Aie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFxvmjkI6TZraE3
-----END CERTIFICATE-----
Security Communication RootCA3 Security Communication RootCA3
============================== ==============================
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----
@ -3361,3 +3312,140 @@ SR9BIgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK++kpRuDCK
W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8g W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8g
UXOQwKhbYdDFUDn9hf7B43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== UXOQwKhbYdDFUDn9hf7B43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w==
-----END CERTIFICATE----- -----END CERTIFICATE-----
Sectigo Public Server Authentication Root E46
=============================================
-----BEGIN CERTIFICATE-----
MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQswCQYDVQQGEwJH
QjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBTZXJ2
ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5
WjBfMQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0
aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUr
gQQAIgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccCWvkEN/U0
NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+6xnOQ6OjQjBAMB0GA1Ud
DgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
/zAKBggqhkjOPQQDAwNnADBkAjAn7qRaqCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RH
lAFWovgzJQxC36oCMB3q4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21U
SAGKcw==
-----END CERTIFICATE-----
Sectigo Public Server Authentication Root R46
=============================================
-----BEGIN CERTIFICATE-----
MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBfMQswCQYDVQQG
EwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT
ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwHhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1
OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T
ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDaef0rty2k
1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnzSDBh+oF8HqcIStw+Kxwf
GExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xfiOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMP
FF1bFOdLvt30yNoDN9HWOaEhUTCDsG3XME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vu
ZDCQOc2TZYEhMbUjUDM3IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5Qaz
Yw6A3OASVYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgESJ/A
wSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu+Zd4KKTIRJLpfSYF
plhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt8uaZFURww3y8nDnAtOFr94MlI1fZ
EoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+LHaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW
6aWWrL3DkJiy4Pmi1KZHQ3xtzwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWI
IUkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c
mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQYKlJfp/imTYp
E0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52gDY9hAaLMyZlbcp+nv4fjFg4
exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZAFv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M
0ejf5lG5Nkc/kLnHvALcWxxPDkjBJYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI
84HxZmduTILA7rpXDhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9m
pFuiTdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5dHn5Hrwd
Vw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65LvKRRFHQV80MNNVIIb/b
E/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmm
J1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAYQqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL
-----END CERTIFICATE-----
SSL.com TLS RSA Root CA 2022
============================
-----BEGIN CERTIFICATE-----
MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQG
EwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxTU0wuY29tIFRMUyBSU0Eg
Um9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloXDTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMC
VVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJv
b3QgQ0EgMjAyMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u
9nTPL3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OYt6/wNr/y
7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0insS657Lb85/bRi3pZ7Qcac
oOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3PnxEX4MN8/HdIGkWCVDi1FW24IBydm5M
R7d1VVm0U3TZlMZBrViKMWYPHqIbKUBOL9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDG
D6C1vBdOSHtRwvzpXGk3R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEW
TO6Af77wdr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS+YCk
8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYSd66UNHsef8JmAOSq
g+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoGAtUjHBPW6dvbxrB6y3snm/vg1UYk
7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2fgTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1Ud
EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsu
N+7jhHonLs0ZNbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt
hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsMQtfhWsSWTVTN
j8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvfR4iyrT7gJ4eLSYwfqUdYe5by
iB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJDPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjU
o3KUQyxi4U5cMj29TH0ZR6LDSeeWP4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqo
ENjwuSfr98t67wVylrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7Egkaib
MOlqbLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2wAgDHbICi
vRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3qr5nsLFR+jM4uElZI7xc7
P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sjiMho6/4UIyYOf8kpIEFR3N+2ivEC+5BB0
9+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA=
-----END CERTIFICATE-----
SSL.com TLS ECC Root CA 2022
============================
-----BEGIN CERTIFICATE-----
MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQswCQYDVQQGEwJV
UzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxTU0wuY29tIFRMUyBFQ0MgUm9v
dCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMx
GDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3Qg
Q0EgMjAyMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWy
JGYmacCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFNSeR7T5v1
5wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSJjy+j6CugFFR7
81a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NWuCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGG
MAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w
7deedWo1dlJF4AIxAMeNb0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5
Zn6g6g==
-----END CERTIFICATE-----
Atos TrustedRoot Root CA ECC TLS 2021
=====================================
-----BEGIN CERTIFICATE-----
MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4wLAYDVQQDDCVB
dG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0wCwYDVQQKDARBdG9zMQswCQYD
VQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3Mg
VHJ1c3RlZFJvb3QgUm9vdCBDQSBFQ0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYT
AkRFMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6K
DP/XtXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4AjJn8ZQS
b+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2KCXWfeBmmnoJsmo7jjPX
NtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMDaAAwZQIwW5kp85wxtolrbNa9d+F851F+
uDrNozZffPc8dz7kUK2o59JZDCaOMDtuCCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGY
a3cpetskz2VAv9LcjBHo9H1/IISpQuQo
-----END CERTIFICATE-----
Atos TrustedRoot Root CA RSA TLS 2021
=====================================
-----BEGIN CERTIFICATE-----
MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBMMS4wLAYDVQQD
DCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIxMQ0wCwYDVQQKDARBdG9zMQsw
CQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0
b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNV
BAYTAkRFMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BB
l01Z4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYvYe+W/CBG
vevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZkmGbzSoXfduP9LVq6hdK
ZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDsGY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt
0xU6kGpn8bRrZtkh68rZYnxGEFzedUlnnkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVK
PNe0OwANwI8f4UDErmwh3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMY
sluMWuPD0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzygeBY
Br3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8ANSbhqRAvNncTFd+
rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezBc6eUWsuSZIKmAMFwoW4sKeFYV+xa
fJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lIpw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/
BAUwAwEB/zAdBgNVHQ4EFgQUdEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0G
CSqGSIb3DQEBDAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS
4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPso0UvFJ/1TCpl
Q3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJqM7F78PRreBrAwA0JrRUITWX
AdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuywxfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9G
slA9hGCZcbUztVdF5kJHdWoOsAgMrr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2Vkt
afcxBPTy+av5EzH4AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9q
TFsR0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuYo7Ey7Nmj
1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5dDTedk+SKlOxJTnbPP/l
PqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcEoji2jbDwN/zIIX8/syQbPYtuzE2wFg2W
HYMfRsCbvUOZ58SWLs5fyQ==
-----END CERTIFICATE-----

View File

@ -1,117 +1,98 @@
/* /*
* Copyright 2007-present the original author or authors. * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * http://www.apache.org/licenses/LICENSE-2.0
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing,
* * software distributed under the License is distributed on an
* Unless required by applicable law or agreed to in writing, software * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* distributed under the License is distributed on an "AS IS" BASIS, * KIND, either express or implied. See the License for the
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * specific language governing permissions and limitations
* See the License for the specific language governing permissions and * under the License.
* limitations under the License.
*/ */
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader { import java.io.IOException;
import java.io.InputStream;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
private static final String WRAPPER_VERSION = "0.5.6"; public final class MavenWrapperDownloader
/** {
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. private static final String WRAPPER_VERSION = "3.2.0";
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/** private static final boolean VERBOSE = Boolean.parseBoolean( System.getenv( "MVNW_VERBOSE" ) );
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/** public static void main( String[] args )
* Path where the maven-wrapper.jar will be saved to. {
*/ log( "Apache Maven Wrapper Downloader " + WRAPPER_VERSION );
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/** if ( args.length != 2 )
* Name of the property which should be used to override the default download url for the wrapper. {
*/ System.err.println( " - ERROR wrapperUrl or wrapperJarPath parameter missing" );
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; System.exit( 1 );
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
} }
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); try
if(!outputFile.getParentFile().exists()) { {
if(!outputFile.getParentFile().mkdirs()) { log( " - Downloader started" );
System.out.println( final URL wrapperUrl = new URL( args[0] );
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); final String jarPath = args[1].replace( "..", "" ); // Sanitize path
} final Path wrapperJarPath = Paths.get( jarPath ).toAbsolutePath().normalize();
downloadFileFromURL( wrapperUrl, wrapperJarPath );
log( "Done" );
} }
System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); catch ( IOException e )
try { {
downloadFileFromURL(url, outputFile); System.err.println( "- Error downloading: " + e.getMessage() );
System.out.println("Done"); if ( VERBOSE )
System.exit(0); {
} catch (Throwable e) { e.printStackTrace();
System.out.println("- Error downloading"); }
e.printStackTrace(); System.exit( 1 );
System.exit(1);
} }
} }
private static void downloadFileFromURL(String urlString, File destination) throws Exception { private static void downloadFileFromURL( URL wrapperUrl, Path wrapperJarPath )
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { throws IOException
String username = System.getenv("MVNW_USERNAME"); {
char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); log( " - Downloading to: " + wrapperJarPath );
Authenticator.setDefault(new Authenticator() { if ( System.getenv( "MVNW_USERNAME" ) != null && System.getenv( "MVNW_PASSWORD" ) != null )
{
final String username = System.getenv( "MVNW_USERNAME" );
final char[] password = System.getenv( "MVNW_PASSWORD" ).toCharArray();
Authenticator.setDefault( new Authenticator()
{
@Override @Override
protected PasswordAuthentication getPasswordAuthentication() { protected PasswordAuthentication getPasswordAuthentication()
return new PasswordAuthentication(username, password); {
return new PasswordAuthentication( username, password );
} }
}); } );
}
try ( InputStream inStream = wrapperUrl.openStream() )
{
Files.copy( inStream, wrapperJarPath, StandardCopyOption.REPLACE_EXISTING );
}
log( " - Downloader complete" );
}
private static void log( String msg )
{
if ( VERBOSE )
{
System.out.println( msg );
} }
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
} }
} }

View File

@ -1,2 +1,18 @@
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip # Licensed to the Apache Software Foundation (ASF) under one
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar # or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.3/apache-maven-3.9.3-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar

View File

@ -12,7 +12,7 @@
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<quarkus.platform.artifact-id>quarkus-bom</quarkus.platform.artifact-id> <quarkus.platform.artifact-id>quarkus-bom</quarkus.platform.artifact-id>
<quarkus.platform.group-id>io.quarkus.platform</quarkus.platform.group-id> <quarkus.platform.group-id>io.quarkus.platform</quarkus.platform.group-id>
<quarkus.platform.version>3.2.2.Final</quarkus.platform.version> <quarkus.platform.version>3.4.1</quarkus.platform.version>
<skipITs>true</skipITs> <skipITs>true</skipITs>
<surefire-plugin.version>3.0.0-M7</surefire-plugin.version> <surefire-plugin.version>3.0.0-M7</surefire-plugin.version>
</properties> </properties>

View File

@ -1,117 +1,98 @@
/* /*
* Copyright 2007-present the original author or authors. * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * http://www.apache.org/licenses/LICENSE-2.0
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing,
* * software distributed under the License is distributed on an
* Unless required by applicable law or agreed to in writing, software * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* distributed under the License is distributed on an "AS IS" BASIS, * KIND, either express or implied. See the License for the
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * specific language governing permissions and limitations
* See the License for the specific language governing permissions and * under the License.
* limitations under the License.
*/ */
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader { import java.io.IOException;
import java.io.InputStream;
import java.net.Authenticator;
import java.net.PasswordAuthentication;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
private static final String WRAPPER_VERSION = "0.5.6"; public final class MavenWrapperDownloader
/** {
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. private static final String WRAPPER_VERSION = "3.2.0";
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/** private static final boolean VERBOSE = Boolean.parseBoolean( System.getenv( "MVNW_VERBOSE" ) );
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/** public static void main( String[] args )
* Path where the maven-wrapper.jar will be saved to. {
*/ log( "Apache Maven Wrapper Downloader " + WRAPPER_VERSION );
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/** if ( args.length != 2 )
* Name of the property which should be used to override the default download url for the wrapper. {
*/ System.err.println( " - ERROR wrapperUrl or wrapperJarPath parameter missing" );
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; System.exit( 1 );
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
} }
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); try
if(!outputFile.getParentFile().exists()) { {
if(!outputFile.getParentFile().mkdirs()) { log( " - Downloader started" );
System.out.println( final URL wrapperUrl = new URL( args[0] );
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); final String jarPath = args[1].replace( "..", "" ); // Sanitize path
} final Path wrapperJarPath = Paths.get( jarPath ).toAbsolutePath().normalize();
downloadFileFromURL( wrapperUrl, wrapperJarPath );
log( "Done" );
} }
System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); catch ( IOException e )
try { {
downloadFileFromURL(url, outputFile); System.err.println( "- Error downloading: " + e.getMessage() );
System.out.println("Done"); if ( VERBOSE )
System.exit(0); {
} catch (Throwable e) { e.printStackTrace();
System.out.println("- Error downloading"); }
e.printStackTrace(); System.exit( 1 );
System.exit(1);
} }
} }
private static void downloadFileFromURL(String urlString, File destination) throws Exception { private static void downloadFileFromURL( URL wrapperUrl, Path wrapperJarPath )
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { throws IOException
String username = System.getenv("MVNW_USERNAME"); {
char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); log( " - Downloading to: " + wrapperJarPath );
Authenticator.setDefault(new Authenticator() { if ( System.getenv( "MVNW_USERNAME" ) != null && System.getenv( "MVNW_PASSWORD" ) != null )
{
final String username = System.getenv( "MVNW_USERNAME" );
final char[] password = System.getenv( "MVNW_PASSWORD" ).toCharArray();
Authenticator.setDefault( new Authenticator()
{
@Override @Override
protected PasswordAuthentication getPasswordAuthentication() { protected PasswordAuthentication getPasswordAuthentication()
return new PasswordAuthentication(username, password); {
return new PasswordAuthentication( username, password );
} }
}); } );
}
try ( InputStream inStream = wrapperUrl.openStream() )
{
Files.copy( inStream, wrapperJarPath, StandardCopyOption.REPLACE_EXISTING );
}
log( " - Downloader complete" );
}
private static void log( String msg )
{
if ( VERBOSE )
{
System.out.println( msg );
} }
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
} }
} }

View File

@ -1,2 +1,18 @@
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip # Licensed to the Apache Software Foundation (ASF) under one
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar # or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.3/apache-maven-3.9.3-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar

View File

@ -12,7 +12,7 @@
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<quarkus.platform.artifact-id>quarkus-bom</quarkus.platform.artifact-id> <quarkus.platform.artifact-id>quarkus-bom</quarkus.platform.artifact-id>
<quarkus.platform.group-id>io.quarkus.platform</quarkus.platform.group-id> <quarkus.platform.group-id>io.quarkus.platform</quarkus.platform.group-id>
<quarkus.platform.version>3.2.2.Final</quarkus.platform.version> <quarkus.platform.version>3.4.1</quarkus.platform.version>
<skipITs>true</skipITs> <skipITs>true</skipITs>
<surefire-plugin.version>3.0.0-M7</surefire-plugin.version> <surefire-plugin.version>3.0.0-M7</surefire-plugin.version>
</properties> </properties>

View File

@ -1,2 +0,0 @@
buildpacks:
- docker.io/paketocommunity/rust

View File

@ -6,7 +6,7 @@
<parent> <parent>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId> <artifactId>spring-boot-starter-parent</artifactId>
<version>3.1.0</version> <version>3.1.4</version>
<relativePath/> <relativePath/>
</parent> </parent>
@ -18,7 +18,7 @@
<properties> <properties>
<java.version>17</java.version> <java.version>17</java.version>
<spring-cloud.version>2022.0.3</spring-cloud.version> <spring-cloud.version>2022.0.4</spring-cloud.version>
<compiler-plugin.version>3.11.0</compiler-plugin.version> <compiler-plugin.version>3.11.0</compiler-plugin.version>
<surefire-plugin.version>3.0.0</surefire-plugin.version> <surefire-plugin.version>3.0.0</surefire-plugin.version>
<maven.compiler.parameters>true</maven.compiler.parameters> <maven.compiler.parameters>true</maven.compiler.parameters>

View File

@ -6,7 +6,7 @@
<parent> <parent>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId> <artifactId>spring-boot-starter-parent</artifactId>
<version>3.1.0</version> <version>3.1.4</version>
<relativePath/> <relativePath/>
</parent> </parent>
@ -18,7 +18,7 @@
<properties> <properties>
<java.version>17</java.version> <java.version>17</java.version>
<spring-cloud.version>2022.0.3</spring-cloud.version> <spring-cloud.version>2022.0.4</spring-cloud.version>
<compiler-plugin.version>3.11.0</compiler-plugin.version> <compiler-plugin.version>3.11.0</compiler-plugin.version>
<surefire-plugin.version>3.0.0</surefire-plugin.version> <surefire-plugin.version>3.0.0</surefire-plugin.version>
<maven.compiler.parameters>true</maven.compiler.parameters> <maven.compiler.parameters>true</maven.compiler.parameters>

View File

@ -18,6 +18,7 @@ import (
var runtimeSupportMap = map[string][]string{ var runtimeSupportMap = map[string][]string{
"node": {"pack", "s2i"}, "node": {"pack", "s2i"},
"go": {"pack"}, "go": {"pack"},
"rust": {"pack"},
"python": {"pack", "s2i"}, "python": {"pack", "s2i"},
"quarkus": {"pack", "s2i"}, "quarkus": {"pack", "s2i"},
"springboot": {"pack"}, "springboot": {"pack"},

View File

@ -4,6 +4,7 @@ package oncluster
import ( import (
"path/filepath" "path/filepath"
"regexp"
"strings" "strings"
"testing" "testing"
@ -32,10 +33,14 @@ Notes:
func resolveGitVars() (gitRepoUrl string, gitRef string) { func resolveGitVars() (gitRepoUrl string, gitRef string) {
// On a GitHub Action (Pull Request) these variables will be set // On a GitHub Action (Pull Request) these variables will be set
// https://docs.github.com/en/actions/learn-github-actions/variables // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
gitRepo := common.GetOsEnvOrDefault("GITHUB_REPOSITORY", "knative/func") gitRepo := common.GetOsEnvOrDefault("GITHUB_REPOSITORY", "knative/func")
gitRef = common.GetOsEnvOrDefault("GITHUB_REF", "main")
gitRepoUrl = "https://github.com/" + gitRepo + ".git" gitRepoUrl = "https://github.com/" + gitRepo + ".git"
gitRef = common.GetOsEnvOrDefault("GITHUB_REF", "main")
// GitHub uses 2 refs per merge request (refs/pull/ID/head and refs/pull/ID/merge), ensure using */head
exp := regexp.MustCompile("^refs/pull/(.*?)/merge$")
gitRef = exp.ReplaceAllString(gitRef, "refs/pull/${1}/head")
return return
} }

View File

@ -15,7 +15,8 @@ import (
var runtimeSupportMap = map[string][]string{ var runtimeSupportMap = map[string][]string{
"node": {"pack", "s2i"}, "node": {"pack", "s2i"},
"go": {}, "go": {"pack"},
"rust": {"pack"},
"python": {"pack", "s2i"}, "python": {"pack", "s2i"},
"quarkus": {"pack", "s2i"}, "quarkus": {"pack", "s2i"},
"springboot": {"pack"}, "springboot": {"pack"},
@ -28,6 +29,7 @@ func TestRuntime(t *testing.T) {
var runtimeList = []string{} var runtimeList = []string{}
runtimes, present := os.LookupEnv("E2E_RUNTIMES") runtimes, present := os.LookupEnv("E2E_RUNTIMES")
targetBuilder, _ := os.LookupEnv("FUNC_BUILDER")
if present { if present {
if runtimes != "" { if runtimes != "" {
@ -41,9 +43,11 @@ func TestRuntime(t *testing.T) {
for _, lang := range runtimeList { for _, lang := range runtimeList {
for _, builder := range runtimeSupportMap[lang] { for _, builder := range runtimeSupportMap[lang] {
t.Run(fmt.Sprintf("%v_%v_test", lang, builder), func(t *testing.T) { if targetBuilder == "" || builder == targetBuilder {
runtimeImpl(t, lang, builder) t.Run(fmt.Sprintf("%v_%v_test", lang, builder), func(t *testing.T) {
}) runtimeImpl(t, lang, builder)
})
}
} }
} }

View File

@ -25,7 +25,10 @@ export NODE_DISTRO=linux-x64
export KNATIVE_SERVING_VERSION=${KNATIVE_SERVING_VERSION:-latest} export KNATIVE_SERVING_VERSION=${KNATIVE_SERVING_VERSION:-latest}
export KNATIVE_EVENTING_VERSION=${KNATIVE_EVENTING_VERSION:-latest} export KNATIVE_EVENTING_VERSION=${KNATIVE_EVENTING_VERSION:-latest}
source $(dirname $0)/../vendor/knative.dev/hack/presubmit-tests.sh source "$(dirname "$0")/../vendor/knative.dev/hack/presubmit-tests.sh"
FUNC_REPO_BRANCH_REF="${PULL_PULL_SHA}"
export FUNC_REPO_BRANCH_REF
function post_build_tests() { function post_build_tests() {
local failed=0 local failed=0
@ -71,7 +74,7 @@ function unit_tests() {
make test || failed=1 make test || failed=1
if (( failed )); then if (( failed )); then
results_banner "Unit tests failed" results_banner "Unit tests failed"
exit ${failed} exit "${failed}"
fi fi
template_tests template_tests
} }
@ -81,7 +84,7 @@ function template_tests() {
make test-templates || failed=2 make test-templates || failed=2
if (( failed )); then if (( failed )); then
results_banner "Built-in template tests failed" results_banner "Built-in template tests failed"
exit ${failed} exit "${failed}"
fi fi
} }

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build go1.11 && gc && !purego //go:build gc && !purego
// +build go1.11,gc,!purego // +build gc,!purego
package chacha20 package chacha20

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build go1.11 && gc && !purego //go:build gc && !purego
// +build go1.11,gc,!purego // +build gc,!purego
#include "textflag.h" #include "textflag.h"

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego //go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego // +build !arm64,!s390x,!ppc64le !gc purego
package chacha20 package chacha20

View File

@ -1,71 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

View File

@ -121,11 +121,11 @@ func (d *state) padAndPermute(dsbyte byte) {
copyOut(d, d.buf) copyOut(d, d.buf)
} }
// Write absorbs more data into the hash's state. It produces an error // Write absorbs more data into the hash's state. It panics if any
// if more data is written to the ShakeHash after writing // output has already been read.
func (d *state) Write(p []byte) (written int, err error) { func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing { if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read") panic("sha3: Write after Read")
} }
if d.buf == nil { if d.buf == nil {
d.buf = d.storage.asBytes()[:0] d.buf = d.storage.asBytes()[:0]
@ -182,12 +182,16 @@ func (d *state) Read(out []byte) (n int, err error) {
} }
// Sum applies padding to the hash state and then squeezes out the desired // Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes. // number of output bytes. It panics if any output has already been read.
func (d *state) Sum(in []byte) []byte { func (d *state) Sum(in []byte) []byte {
if d.state != spongeAbsorbing {
panic("sha3: Sum after Read")
}
// Make a copy of the original hash so that caller can keep writing // Make a copy of the original hash so that caller can keep writing
// and summing. // and summing.
dup := d.clone() dup := d.clone()
hash := make([]byte, dup.outputLen) hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
dup.Read(hash) dup.Read(hash)
return append(in, hash...) return append(in, hash...)
} }

View File

@ -49,7 +49,7 @@ type asmState struct {
buf []byte // care must be taken to ensure cap(buf) is a multiple of rate buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
rate int // equivalent to block size rate int // equivalent to block size
storage [3072]byte // underlying storage for buf storage [3072]byte // underlying storage for buf
outputLen int // output length if fixed, 0 if not outputLen int // output length for full security
function code // KIMD/KLMD function code function code // KIMD/KLMD function code
state spongeDirection // whether the sponge is absorbing or squeezing state spongeDirection // whether the sponge is absorbing or squeezing
} }
@ -72,8 +72,10 @@ func newAsmState(function code) *asmState {
s.outputLen = 64 s.outputLen = 64
case shake_128: case shake_128:
s.rate = 168 s.rate = 168
s.outputLen = 32
case shake_256: case shake_256:
s.rate = 136 s.rate = 136
s.outputLen = 64
default: default:
panic("sha3: unrecognized function code") panic("sha3: unrecognized function code")
} }
@ -108,7 +110,7 @@ func (s *asmState) resetBuf() {
// It never returns an error. // It never returns an error.
func (s *asmState) Write(b []byte) (int, error) { func (s *asmState) Write(b []byte) (int, error) {
if s.state != spongeAbsorbing { if s.state != spongeAbsorbing {
panic("sha3: write to sponge after read") panic("sha3: Write after Read")
} }
length := len(b) length := len(b)
for len(b) > 0 { for len(b) > 0 {
@ -192,8 +194,8 @@ func (s *asmState) Read(out []byte) (n int, err error) {
// Sum appends the current hash to b and returns the resulting slice. // Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state. // It does not change the underlying hash state.
func (s *asmState) Sum(b []byte) []byte { func (s *asmState) Sum(b []byte) []byte {
if s.outputLen == 0 { if s.state != spongeAbsorbing {
panic("sha3: cannot call Sum on SHAKE functions") panic("sha3: Sum after Read")
} }
// Copy the state to preserve the original. // Copy the state to preserve the original.

View File

@ -17,26 +17,25 @@ package sha3
import ( import (
"encoding/binary" "encoding/binary"
"hash"
"io" "io"
) )
// ShakeHash defines the interface to hash functions that // ShakeHash defines the interface to hash functions that support
// support arbitrary-length output. // arbitrary-length output. When used as a plain [hash.Hash], it
// produces minimum-length outputs that provide full-strength generic
// security.
type ShakeHash interface { type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is hash.Hash
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's // Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum) // state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error. // It never returns an error, but subsequent calls to Write or Sum
// will panic.
io.Reader io.Reader
// Clone returns a copy of the ShakeHash in its current state. // Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
} }
// cSHAKE specific context // cSHAKE specific context
@ -81,8 +80,8 @@ func leftEncode(value uint64) []byte {
return b[i-1:] return b[i-1:]
} }
func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash { func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}} c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
// leftEncode returns max 9 bytes // leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
@ -119,7 +118,7 @@ func NewShake128() ShakeHash {
if h := newShake128Asm(); h != nil { if h := newShake128Asm(); h != nil {
return h return h
} }
return &state{rate: rate128, dsbyte: dsbyteShake} return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
} }
// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
@ -129,7 +128,7 @@ func NewShake256() ShakeHash {
if h := newShake256Asm(); h != nil { if h := newShake256Asm(); h != nil {
return h return h
} }
return &state{rate: rate256, dsbyte: dsbyteShake} return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
} }
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
@ -142,7 +141,7 @@ func NewCShake128(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 { if len(N) == 0 && len(S) == 0 {
return NewShake128() return NewShake128()
} }
return newCShake(N, S, rate128, dsbyteCShake) return newCShake(N, S, rate128, 32, dsbyteCShake)
} }
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
@ -155,7 +154,7 @@ func NewCShake256(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 { if len(N) == 0 && len(S) == 0 {
return NewShake256() return NewShake256()
} }
return newCShake(N, S, rate256, dsbyteCShake) return newCShake(N, S, rate256, 64, dsbyteCShake)
} }
// ShakeSum128 writes an arbitrary-length digest of data into hash. // ShakeSum128 writes an arbitrary-length digest of data into hash.

View File

@ -16,6 +16,7 @@ import (
"bytes" "bytes"
"crypto/dsa" "crypto/dsa"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic" "crypto/elliptic"
"crypto/rsa" "crypto/rsa"
"encoding/base64" "encoding/base64"
@ -26,7 +27,6 @@ import (
"math/big" "math/big"
"sync" "sync"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
) )

View File

@ -7,6 +7,7 @@ package agent
import ( import (
"crypto/dsa" "crypto/dsa"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic" "crypto/elliptic"
"crypto/rsa" "crypto/rsa"
"encoding/binary" "encoding/binary"
@ -16,7 +17,6 @@ import (
"log" "log"
"math/big" "math/big"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
) )

View File

@ -16,8 +16,9 @@ import (
// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear // Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear
// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. // in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms.
// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't // Unlike key algorithm names, these are not passed to AlgorithmSigner nor
// appear in the Signature.Format field. // returned by MultiAlgorithmSigner and don't appear in the Signature.Format
// field.
const ( const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
@ -255,10 +256,17 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
return nil, errors.New("ssh: signer and cert have different public key") return nil, errors.New("ssh: signer and cert have different public key")
} }
if algorithmSigner, ok := signer.(AlgorithmSigner); ok { switch s := signer.(type) {
case MultiAlgorithmSigner:
return &multiAlgorithmSigner{
AlgorithmSigner: &algorithmOpenSSHCertSigner{
&openSSHCertSigner{cert, signer}, s},
supportedAlgorithms: s.Algorithms(),
}, nil
case AlgorithmSigner:
return &algorithmOpenSSHCertSigner{ return &algorithmOpenSSHCertSigner{
&openSSHCertSigner{cert, signer}, algorithmSigner}, nil &openSSHCertSigner{cert, signer}, s}, nil
} else { default:
return &openSSHCertSigner{cert, signer}, nil return &openSSHCertSigner{cert, signer}, nil
} }
} }
@ -432,7 +440,9 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
} }
// SignCert signs the certificate with an authority, setting the Nonce, // SignCert signs the certificate with an authority, setting the Nonce,
// SignatureKey, and Signature fields. // SignatureKey, and Signature fields. If the authority implements the
// MultiAlgorithmSigner interface the first algorithm in the list is used. This
// is useful if you want to sign with a specific algorithm.
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32) c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil { if _, err := io.ReadFull(rand, c.Nonce); err != nil {
@ -440,8 +450,20 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
} }
c.SignatureKey = authority.PublicKey() c.SignatureKey = authority.PublicKey()
// Default to KeyAlgoRSASHA512 for ssh-rsa signers. if v, ok := authority.(MultiAlgorithmSigner); ok {
if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { if len(v.Algorithms()) == 0 {
return errors.New("the provided authority has no signature algorithm")
}
// Use the first algorithm in the list.
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0])
if err != nil {
return err
}
c.Signature = sig
return nil
} else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
// Default to KeyAlgoRSASHA512 for ssh-rsa signers.
// TODO: consider using KeyAlgoRSASHA256 as default.
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512)
if err != nil { if err != nil {
return err return err

View File

@ -71,7 +71,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
for auth := AuthMethod(new(noneAuth)); auth != nil; { for auth := AuthMethod(new(noneAuth)); auth != nil; {
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
if err != nil { if err != nil {
return err // We return the error later if there is no other method left to
// try.
ok = authFailure
} }
if ok == authSuccess { if ok == authSuccess {
// success // success
@ -101,6 +103,12 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
} }
} }
} }
if auth == nil && err != nil {
// We have an error and there are no other authentication methods to
// try, so we return it.
return err
}
} }
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
} }
@ -217,21 +225,45 @@ func (cb publicKeyCallback) method() string {
return "publickey" return "publickey"
} }
func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) {
var as MultiAlgorithmSigner
keyFormat := signer.PublicKey().Type() keyFormat := signer.PublicKey().Type()
// Like in sendKexInit, if the public key implements AlgorithmSigner we // If the signer implements MultiAlgorithmSigner we use the algorithms it
// assume it supports all algorithms, otherwise only the key format one. // support, if it implements AlgorithmSigner we assume it supports all
as, ok := signer.(AlgorithmSigner) // algorithms, otherwise only the key format one.
if !ok { switch s := signer.(type) {
return algorithmSignerWrapper{signer}, keyFormat case MultiAlgorithmSigner:
as = s
case AlgorithmSigner:
as = &multiAlgorithmSigner{
AlgorithmSigner: s,
supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)),
}
default:
as = &multiAlgorithmSigner{
AlgorithmSigner: algorithmSignerWrapper{signer},
supportedAlgorithms: []string{underlyingAlgo(keyFormat)},
}
}
getFallbackAlgo := func() (string, error) {
// Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error.
if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
}
return keyFormat, nil
} }
extPayload, ok := extensions["server-sig-algs"] extPayload, ok := extensions["server-sig-algs"]
if !ok { if !ok {
// If there is no "server-sig-algs" extension, fall back to the key // If there is no "server-sig-algs" extension use the fallback
// format algorithm. // algorithm.
return as, keyFormat algo, err := getFallbackAlgo()
return as, algo, err
} }
// The server-sig-algs extension only carries underlying signature // The server-sig-algs extension only carries underlying signature
@ -245,15 +277,22 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as Alg
} }
} }
keyAlgos := algorithmsForKeyFormat(keyFormat) // Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo)
}
}
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
if err != nil { if err != nil {
// If there is no overlap, try the key anyway with the key format // If there is no overlap, return the fallback algorithm to support
// algorithm, to support servers that fail to list all supported // servers that fail to list all supported algorithms.
// algorithms. algo, err := getFallbackAlgo()
return as, keyFormat return as, algo, err
} }
return as, algo return as, algo, nil
} }
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) {
@ -267,10 +306,17 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
return authFailure, nil, err return authFailure, nil, err
} }
var methods []string var methods []string
var errSigAlgo error
for _, signer := range signers { for _, signer := range signers {
pub := signer.PublicKey() pub := signer.PublicKey()
as, algo := pickSignatureAlgorithm(signer, extensions) as, algo, err := pickSignatureAlgorithm(signer, extensions)
if err != nil && errSigAlgo == nil {
// If we cannot negotiate a signature algorithm store the first
// error so we can return it to provide a more meaningful message if
// no other signers work.
errSigAlgo = err
continue
}
ok, err := validateKey(pub, algo, user, c) ok, err := validateKey(pub, algo, user, c)
if err != nil { if err != nil {
return authFailure, nil, err return authFailure, nil, err
@ -317,22 +363,12 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any // contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when // other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required. // additional authentication methods are required.
if success == authSuccess || !containsMethod(methods, cb.method()) { if success == authSuccess || !contains(methods, cb.method()) {
return success, methods, err return success, methods, err
} }
} }
return authFailure, methods, nil return authFailure, methods, errSigAlgo
}
func containsMethod(methods []string, method string) bool {
for _, m := range methods {
if m == method {
return true
}
}
return false
} }
// validateKey validates the key provided is acceptable to the server. // validateKey validates the key provided is acceptable to the server.

View File

@ -49,7 +49,8 @@ var supportedKexAlgos = []string{
// P384 and P521 are not constant-time yet, but since we don't // P384 and P521 are not constant-time yet, but since we don't
// reuse ephemeral keys, using them for ECDH should be OK. // reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1,
kexAlgoDH1SHA1,
} }
// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden
@ -59,8 +60,9 @@ var serverForbiddenKexAlgos = map[string]struct{}{
kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests
} }
// preferredKexAlgos specifies the default preference for key-exchange algorithms // preferredKexAlgos specifies the default preference for key-exchange
// in preference order. // algorithms in preference order. The diffie-hellman-group16-sha512 algorithm
// is disabled by default because it is a bit slower than the others.
var preferredKexAlgos = []string{ var preferredKexAlgos = []string{
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
@ -70,12 +72,12 @@ var preferredKexAlgos = []string{
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order. // of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{ var supportedHostKeyAlgos = []string{
CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSASHA512, KeyAlgoRSASHA256, KeyAlgoRSASHA256, KeyAlgoRSASHA512,
KeyAlgoRSA, KeyAlgoDSA, KeyAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519, KeyAlgoED25519,
@ -85,7 +87,7 @@ var supportedHostKeyAlgos = []string{
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
// because they have reached the end of their useful life. // because they have reached the end of their useful life.
var supportedMACs = []string{ var supportedMACs = []string{
"hmac-sha2-512-etm@openssh.com", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96",
} }
var supportedCompressions = []string{compressionNone} var supportedCompressions = []string{compressionNone}
@ -119,6 +121,13 @@ func algorithmsForKeyFormat(keyFormat string) []string {
} }
} }
// isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms.
func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA)
return contains(algos, underlyingAlgo(algo))
}
// supportedPubKeyAuthAlgos specifies the supported client public key // supportedPubKeyAuthAlgos specifies the supported client public key
// authentication algorithms. Note that this doesn't include certificate types // authentication algorithms. Note that this doesn't include certificate types
// since those use the underlying algorithm. This list is sent to the client if // since those use the underlying algorithm. This list is sent to the client if
@ -262,16 +271,16 @@ type Config struct {
// unspecified, a size suitable for the chosen cipher is used. // unspecified, a size suitable for the chosen cipher is used.
RekeyThreshold uint64 RekeyThreshold uint64
// The allowed key exchanges algorithms. If unspecified then a // The allowed key exchanges algorithms. If unspecified then a default set
// default set of algorithms is used. // of algorithms is used. Unsupported values are silently ignored.
KeyExchanges []string KeyExchanges []string
// The allowed cipher algorithms. If unspecified then a sensible // The allowed cipher algorithms. If unspecified then a sensible default is
// default is used. // used. Unsupported values are silently ignored.
Ciphers []string Ciphers []string
// The allowed MAC algorithms. If unspecified then a sensible default // The allowed MAC algorithms. If unspecified then a sensible default is
// is used. // used. Unsupported values are silently ignored.
MACs []string MACs []string
} }
@ -288,7 +297,7 @@ func (c *Config) SetDefaults() {
var ciphers []string var ciphers []string
for _, c := range c.Ciphers { for _, c := range c.Ciphers {
if cipherModes[c] != nil { if cipherModes[c] != nil {
// reject the cipher if we have no cipherModes definition // Ignore the cipher if we have no cipherModes definition.
ciphers = append(ciphers, c) ciphers = append(ciphers, c)
} }
} }
@ -297,10 +306,26 @@ func (c *Config) SetDefaults() {
if c.KeyExchanges == nil { if c.KeyExchanges == nil {
c.KeyExchanges = preferredKexAlgos c.KeyExchanges = preferredKexAlgos
} }
var kexs []string
for _, k := range c.KeyExchanges {
if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k)
}
}
c.KeyExchanges = kexs
if c.MACs == nil { if c.MACs == nil {
c.MACs = supportedMACs c.MACs = supportedMACs
} }
var macs []string
for _, m := range c.MACs {
if macModes[m] != nil {
// Ignore the MAC if we have no macModes definition.
macs = append(macs, m)
}
}
c.MACs = macs
if c.RekeyThreshold == 0 { if c.RekeyThreshold == 0 {
// cipher specific default // cipher specific default

View File

@ -13,6 +13,7 @@ others.
References: References:
[PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1

View File

@ -461,19 +461,24 @@ func (t *handshakeTransport) sendKexInit() error {
isServer := len(t.hostKeys) > 0 isServer := len(t.hostKeys) > 0
if isServer { if isServer {
for _, k := range t.hostKeys { for _, k := range t.hostKeys {
// If k is an AlgorithmSigner, presume it supports all signature algorithms // If k is a MultiAlgorithmSigner, we restrict the signature
// associated with the key format. (Ideally AlgorithmSigner would have a // algorithms. If k is a AlgorithmSigner, presume it supports all
// method to advertise supported algorithms, but it doesn't. This means that // signature algorithms associated with the key format. If k is not
// adding support for a new algorithm is a breaking change, as we will // an AlgorithmSigner, we can only assume it only supports the
// immediately negotiate it even if existing implementations don't support // algorithms that matches the key format. (This means that Sign
// it. If that ever happens, we'll have to figure something out.) // can't pick a different default).
// If k is not an AlgorithmSigner, we can only assume it only supports the
// algorithms that matches the key format. (This means that Sign can't pick
// a different default.)
keyFormat := k.PublicKey().Type() keyFormat := k.PublicKey().Type()
if _, ok := k.(AlgorithmSigner); ok {
switch s := k.(type) {
case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
case AlgorithmSigner:
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...)
} else { default:
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat)
} }
} }
@ -642,16 +647,20 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See // message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1. // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
extInfo := &extInfoMsg{ extInfo := &extInfoMsg{
NumExtensions: 1, NumExtensions: 2,
Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1),
} }
extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs"))
extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...)
extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList))
extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...)
extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com"))
extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...)
extInfo.Payload = appendInt(extInfo.Payload, 1)
extInfo.Payload = append(extInfo.Payload, "0"...)
if err := t.conn.writePacket(Marshal(extInfo)); err != nil { if err := t.conn.writePacket(Marshal(extInfo)); err != nil {
return err return err
} }
@ -685,9 +694,16 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys { for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok {
if !contains(s.Algorithms(), underlyingAlgo(algo)) {
continue
}
}
if algo == k.PublicKey().Type() { if algo == k.PublicKey().Type() {
return algorithmSignerWrapper{k} return algorithmSignerWrapper{k}
} }
k, ok := k.(AlgorithmSigner) k, ok := k.(AlgorithmSigner)
if !ok { if !ok {
continue continue

View File

@ -23,6 +23,7 @@ const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256"
kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512"
kexAlgoECDH256 = "ecdh-sha2-nistp256" kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384" kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521" kexAlgoECDH521 = "ecdh-sha2-nistp521"
@ -430,6 +431,17 @@ func init() {
hashFunc: crypto.SHA256, hashFunc: crypto.SHA256,
} }
// This is the group called diffie-hellman-group16-sha512 in RFC
// 8268 and Oakley Group 16 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
hashFunc: crypto.SHA512,
}
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}

View File

@ -11,13 +11,16 @@ import (
"crypto/cipher" "crypto/cipher"
"crypto/dsa" "crypto/dsa"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic" "crypto/elliptic"
"crypto/md5" "crypto/md5"
"crypto/rand"
"crypto/rsa" "crypto/rsa"
"crypto/sha256" "crypto/sha256"
"crypto/x509" "crypto/x509"
"encoding/asn1" "encoding/asn1"
"encoding/base64" "encoding/base64"
"encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/pem" "encoding/pem"
"errors" "errors"
@ -26,7 +29,6 @@ import (
"math/big" "math/big"
"strings" "strings"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
) )
@ -295,6 +297,18 @@ func MarshalAuthorizedKey(key PublicKey) []byte {
return b.Bytes() return b.Bytes()
} }
// MarshalPrivateKey returns a PEM block with the private key serialized in the
// OpenSSH format.
func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler)
}
// MarshalPrivateKeyWithPassphrase returns a PEM block holding the encrypted
// private key serialized in the OpenSSH format.
func MarshalPrivateKeyWithPassphrase(key crypto.PrivateKey, comment string, passphrase []byte) (*pem.Block, error) {
return marshalOpenSSHPrivateKey(key, comment, passphraseProtectedOpenSSHMarshaler(passphrase))
}
// PublicKey represents a public key using an unspecified algorithm. // PublicKey represents a public key using an unspecified algorithm.
// //
// Some PublicKeys provided by this package also implement CryptoPublicKey. // Some PublicKeys provided by this package also implement CryptoPublicKey.
@ -321,7 +335,7 @@ type CryptoPublicKey interface {
// A Signer can create signatures that verify against a public key. // A Signer can create signatures that verify against a public key.
// //
// Some Signers provided by this package also implement AlgorithmSigner. // Some Signers provided by this package also implement MultiAlgorithmSigner.
type Signer interface { type Signer interface {
// PublicKey returns the associated PublicKey. // PublicKey returns the associated PublicKey.
PublicKey() PublicKey PublicKey() PublicKey
@ -336,9 +350,9 @@ type Signer interface {
// An AlgorithmSigner is a Signer that also supports specifying an algorithm to // An AlgorithmSigner is a Signer that also supports specifying an algorithm to
// use for signing. // use for signing.
// //
// An AlgorithmSigner can't advertise the algorithms it supports, so it should // An AlgorithmSigner can't advertise the algorithms it supports, unless it also
// be prepared to be invoked with every algorithm supported by the public key // implements MultiAlgorithmSigner, so it should be prepared to be invoked with
// format. // every algorithm supported by the public key format.
type AlgorithmSigner interface { type AlgorithmSigner interface {
Signer Signer
@ -349,6 +363,75 @@ type AlgorithmSigner interface {
SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
} }
// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms
// supported by that signer.
type MultiAlgorithmSigner interface {
AlgorithmSigner
// Algorithms returns the available algorithms in preference order. The list
// must not be empty, and it must not include certificate types.
Algorithms() []string
}
// NewSignerWithAlgorithms returns a signer restricted to the specified
// algorithms. The algorithms must be set in preference order. The list must not
// be empty, and it must not include certificate types. An error is returned if
// the specified algorithms are incompatible with the public key type.
func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) {
if len(algorithms) == 0 {
return nil, errors.New("ssh: please specify at least one valid signing algorithm")
}
var signerAlgos []string
supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type()))
if s, ok := signer.(*multiAlgorithmSigner); ok {
signerAlgos = s.Algorithms()
} else {
signerAlgos = supportedAlgos
}
for _, algo := range algorithms {
if !contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type())
}
if !contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
}
}
return &multiAlgorithmSigner{
AlgorithmSigner: signer,
supportedAlgorithms: algorithms,
}, nil
}
type multiAlgorithmSigner struct {
AlgorithmSigner
supportedAlgorithms []string
}
func (s *multiAlgorithmSigner) Algorithms() []string {
return s.supportedAlgorithms
}
func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool {
if algorithm == "" {
algorithm = underlyingAlgo(s.PublicKey().Type())
}
for _, algo := range s.supportedAlgorithms {
if algorithm == algo {
return true
}
}
return false
}
func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
if !s.isAlgorithmSupported(algorithm) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms)
}
return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm)
}
type rsaPublicKey rsa.PublicKey type rsaPublicKey rsa.PublicKey
func (r *rsaPublicKey) Type() string { func (r *rsaPublicKey) Type() string {
@ -512,6 +595,10 @@ func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) return k.SignWithAlgorithm(rand, data, k.PublicKey().Type())
} }
func (k *dsaPrivateKey) Algorithms() []string {
return []string{k.PublicKey().Type()}
}
func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
if algorithm != "" && algorithm != k.PublicKey().Type() { if algorithm != "" && algorithm != k.PublicKey().Type() {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
@ -961,13 +1048,16 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) return s.SignWithAlgorithm(rand, data, s.pubKey.Type())
} }
func (s *wrappedSigner) Algorithms() []string {
return algorithmsForKeyFormat(s.pubKey.Type())
}
func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
if algorithm == "" { if algorithm == "" {
algorithm = s.pubKey.Type() algorithm = s.pubKey.Type()
} }
supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) if !contains(s.Algorithms(), algorithm) {
if !contains(supportedAlgos, algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
} }
@ -1241,28 +1331,106 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc {
} }
} }
func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) {
key := generateOpenSSHPadding(privKeyBlock, 8)
return key, "none", "none", "", nil
}
func passphraseProtectedOpenSSHMarshaler(passphrase []byte) openSSHEncryptFunc {
return func(privKeyBlock []byte) ([]byte, string, string, string, error) {
salt := make([]byte, 16)
if _, err := rand.Read(salt); err != nil {
return nil, "", "", "", err
}
opts := struct {
Salt []byte
Rounds uint32
}{salt, 16}
// Derive key to encrypt the private key block.
k, err := bcrypt_pbkdf.Key(passphrase, salt, int(opts.Rounds), 32+aes.BlockSize)
if err != nil {
return nil, "", "", "", err
}
// Add padding matching the block size of AES.
keyBlock := generateOpenSSHPadding(privKeyBlock, aes.BlockSize)
// Encrypt the private key using the derived secret.
dst := make([]byte, len(keyBlock))
key, iv := k[:32], k[32:]
block, err := aes.NewCipher(key)
if err != nil {
return nil, "", "", "", err
}
stream := cipher.NewCTR(block, iv)
stream.XORKeyStream(dst, keyBlock)
return dst, "aes256-ctr", "bcrypt", string(Marshal(opts)), nil
}
}
const privateKeyAuthMagic = "openssh-key-v1\x00"
type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error)
type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error)
type openSSHEncryptedPrivateKey struct {
CipherName string
KdfName string
KdfOpts string
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
}
type openSSHPrivateKey struct {
Check1 uint32
Check2 uint32
Keytype string
Rest []byte `ssh:"rest"`
}
type openSSHRSAPrivateKey struct {
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int
P *big.Int
Q *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHEd25519PrivateKey struct {
Pub []byte
Priv []byte
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHECDSAPrivateKey struct {
Curve string
Pub []byte
D *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt // parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt
// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used // function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used
// as the decrypt function to parse an unencrypted private key. See // as the decrypt function to parse an unencrypted private key. See
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. // https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key.
func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) {
const magic = "openssh-key-v1\x00" if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic {
if len(key) < len(magic) || string(key[:len(magic)]) != magic {
return nil, errors.New("ssh: invalid openssh private key format") return nil, errors.New("ssh: invalid openssh private key format")
} }
remaining := key[len(magic):] remaining := key[len(privateKeyAuthMagic):]
var w struct {
CipherName string
KdfName string
KdfOpts string
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
}
var w openSSHEncryptedPrivateKey
if err := Unmarshal(remaining, &w); err != nil { if err := Unmarshal(remaining, &w); err != nil {
return nil, err return nil, err
} }
@ -1284,13 +1452,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
return nil, err return nil, err
} }
pk1 := struct { var pk1 openSSHPrivateKey
Check1 uint32
Check2 uint32
Keytype string
Rest []byte `ssh:"rest"`
}{}
if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 {
if w.CipherName != "none" { if w.CipherName != "none" {
return nil, x509.IncorrectPasswordError return nil, x509.IncorrectPasswordError
@ -1300,18 +1462,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
switch pk1.Keytype { switch pk1.Keytype {
case KeyAlgoRSA: case KeyAlgoRSA:
// https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 var key openSSHRSAPrivateKey
key := struct {
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int
P *big.Int
Q *big.Int
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(pk1.Rest, &key); err != nil { if err := Unmarshal(pk1.Rest, &key); err != nil {
return nil, err return nil, err
} }
@ -1337,13 +1488,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
return pk, nil return pk, nil
case KeyAlgoED25519: case KeyAlgoED25519:
key := struct { var key openSSHEd25519PrivateKey
Pub []byte
Priv []byte
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(pk1.Rest, &key); err != nil { if err := Unmarshal(pk1.Rest, &key); err != nil {
return nil, err return nil, err
} }
@ -1360,14 +1505,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
copy(pk, key.Priv) copy(pk, key.Priv)
return &pk, nil return &pk, nil
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
key := struct { var key openSSHECDSAPrivateKey
Curve string
Pub []byte
D *big.Int
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(pk1.Rest, &key); err != nil { if err := Unmarshal(pk1.Rest, &key); err != nil {
return nil, err return nil, err
} }
@ -1415,6 +1553,131 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv
} }
} }
func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) {
var w openSSHEncryptedPrivateKey
var pk1 openSSHPrivateKey
// Random check bytes.
var check uint32
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
return nil, err
}
pk1.Check1 = check
pk1.Check2 = check
w.NumKeys = 1
// Use a []byte directly on ed25519 keys.
if k, ok := key.(*ed25519.PrivateKey); ok {
key = *k
}
switch k := key.(type) {
case *rsa.PrivateKey:
E := new(big.Int).SetInt64(int64(k.PublicKey.E))
// Marshal public key:
// E and N are in reversed order in the public and private key.
pubKey := struct {
KeyType string
E *big.Int
N *big.Int
}{
KeyAlgoRSA,
E, k.PublicKey.N,
}
w.PubKey = Marshal(pubKey)
// Marshal private key.
key := openSSHRSAPrivateKey{
N: k.PublicKey.N,
E: E,
D: k.D,
Iqmp: k.Precomputed.Qinv,
P: k.Primes[0],
Q: k.Primes[1],
Comment: comment,
}
pk1.Keytype = KeyAlgoRSA
pk1.Rest = Marshal(key)
case ed25519.PrivateKey:
pub := make([]byte, ed25519.PublicKeySize)
priv := make([]byte, ed25519.PrivateKeySize)
copy(pub, k[32:])
copy(priv, k)
// Marshal public key.
pubKey := struct {
KeyType string
Pub []byte
}{
KeyAlgoED25519, pub,
}
w.PubKey = Marshal(pubKey)
// Marshal private key.
key := openSSHEd25519PrivateKey{
Pub: pub,
Priv: priv,
Comment: comment,
}
pk1.Keytype = KeyAlgoED25519
pk1.Rest = Marshal(key)
case *ecdsa.PrivateKey:
var curve, keyType string
switch name := k.Curve.Params().Name; name {
case "P-256":
curve = "nistp256"
keyType = KeyAlgoECDSA256
case "P-384":
curve = "nistp384"
keyType = KeyAlgoECDSA384
case "P-521":
curve = "nistp521"
keyType = KeyAlgoECDSA521
default:
return nil, errors.New("ssh: unhandled elliptic curve " + name)
}
pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y)
// Marshal public key.
pubKey := struct {
KeyType string
Curve string
Pub []byte
}{
keyType, curve, pub,
}
w.PubKey = Marshal(pubKey)
// Marshal private key.
key := openSSHECDSAPrivateKey{
Curve: curve,
Pub: pub,
D: k.D,
Comment: comment,
}
pk1.Keytype = keyType
pk1.Rest = Marshal(key)
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", k)
}
var err error
// Add padding and encrypt the key if necessary.
w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1))
if err != nil {
return nil, err
}
b := Marshal(w)
block := &pem.Block{
Type: "OPENSSH PRIVATE KEY",
Bytes: append([]byte(privateKeyAuthMagic), b...),
}
return block, nil
}
func checkOpenSSHKeyPadding(pad []byte) error { func checkOpenSSHKeyPadding(pad []byte) error {
for i, b := range pad { for i, b := range pad {
if int(b) != i+1 { if int(b) != i+1 {
@ -1424,6 +1687,13 @@ func checkOpenSSHKeyPadding(pad []byte) error {
return nil return nil
} }
func generateOpenSSHPadding(block []byte, blockSize int) []byte {
for i, l := 0, len(block); (l+i)%blockSize != 0; i++ {
block = append(block, byte(i+1))
}
return block
}
// FingerprintLegacyMD5 returns the user presentation of the key's // FingerprintLegacyMD5 returns the user presentation of the key's
// fingerprint as described by RFC 4716 section 4. // fingerprint as described by RFC 4716 section 4.
func FingerprintLegacyMD5(pubKey PublicKey) string { func FingerprintLegacyMD5(pubKey PublicKey) string {

View File

@ -349,6 +349,20 @@ type userAuthGSSAPIError struct {
LanguageTag string LanguageTag string
} }
// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9
const msgPing = 192
type pingMsg struct {
Data string `sshtype:"192"`
}
// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9
const msgPong = 193
type pongMsg struct {
Data string `sshtype:"193"`
}
// typeTags returns the possible type bytes for the given reflect.Type, which // typeTags returns the possible type bytes for the given reflect.Type, which
// should be a struct. The possible values are separated by a '|' character. // should be a struct. The possible values are separated by a '|' character.
func typeTags(structType reflect.Type) (tags []byte) { func typeTags(structType reflect.Type) (tags []byte) {

View File

@ -231,6 +231,12 @@ func (m *mux) onePacket() error {
return m.handleChannelOpen(packet) return m.handleChannelOpen(packet)
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
return m.handleGlobalPacket(packet) return m.handleGlobalPacket(packet)
case msgPing:
var msg pingMsg
if err := Unmarshal(packet, &msg); err != nil {
return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err)
}
return m.sendMessage(pongMsg(msg))
} }
// assume a channel packet. // assume a channel packet.

View File

@ -370,6 +370,25 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c
return authErr, perms, nil return authErr, perms, nil
} }
// isAlgoCompatible checks if the signature format is compatible with the
// selected algorithm taking into account edge cases that occur with old
// clients.
func isAlgoCompatible(algo, sigFormat string) bool {
// Compatibility for old clients.
//
// For certificate authentication with OpenSSH 7.2-7.7 signature format can
// be rsa-sha2-256 or rsa-sha2-512 for the algorithm
// ssh-rsa-cert-v01@openssh.com.
//
// With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512
// for signature format ssh-rsa.
if isRSA(algo) && isRSA(sigFormat) {
return true
}
// Standard case: the underlying algorithm must match the signature format.
return underlyingAlgo(algo) == sigFormat
}
// ServerAuthError represents server authentication errors and is // ServerAuthError represents server authentication errors and is
// sometimes returned by NewServerConn. It appends any authentication // sometimes returned by NewServerConn. It appends any authentication
// errors that may occur, and is returned if all of the authentication // errors that may occur, and is returned if all of the authentication
@ -557,7 +576,16 @@ userAuthLoop:
if !ok || len(payload) > 0 { if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest) return nil, parseError(msgUserAuthRequest)
} }
// Ensure the declared public key algo is compatible with the
// decoded one. This check will ensure we don't accept e.g.
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither.
if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo)
break
}
// Ensure the public key algo and signature algo // Ensure the public key algo and signature algo
// are supported. Compare the private key // are supported. Compare the private key
// algorithm name that corresponds to algo with // algorithm name that corresponds to algo with
@ -567,7 +595,7 @@ userAuthLoop:
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break break
} }
if underlyingAlgo(algo) != sig.Format { if !isAlgoCompatible(algo, sig.Format) {
authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo)
break break
} }

View File

@ -194,9 +194,8 @@ func render1(w writer, n *Node) error {
} }
} }
// Render any child nodes. // Render any child nodes
switch n.Data { if childTextNodesAreLiteral(n) {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
for c := n.FirstChild; c != nil; c = c.NextSibling { for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == TextNode { if c.Type == TextNode {
if _, err := w.WriteString(c.Data); err != nil { if _, err := w.WriteString(c.Data); err != nil {
@ -213,7 +212,7 @@ func render1(w writer, n *Node) error {
// last element in the file, with no closing tag. // last element in the file, with no closing tag.
return plaintextAbort return plaintextAbort
} }
default: } else {
for c := n.FirstChild; c != nil; c = c.NextSibling { for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil { if err := render1(w, c); err != nil {
return err return err
@ -231,6 +230,27 @@ func render1(w writer, n *Node) error {
return w.WriteByte('>') return w.WriteByte('>')
} }
func childTextNodesAreLiteral(n *Node) bool {
// Per WHATWG HTML 13.3, if the parent of the current node is a style,
// script, xmp, iframe, noembed, noframes, or plaintext element, and the
// current node is a text node, append the value of the node's data
// literally. The specification is not explicit about it, but we only
// enforce this if we are in the HTML namespace (i.e. when the namespace is
// "").
// NOTE: we also always include noscript elements, although the
// specification states that they should only be rendered as such if
// scripting is enabled for the node (which is not something we track).
if n.Namespace != "" {
return false
}
switch n.Data {
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
return true
default:
return false
}
}
// writeQuoted writes s to w surrounded by quotes. Normally it will use double // writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes. // quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration. // It is used for writing the identifiers in a doctype declaration.

View File

@ -1,51 +0,0 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

View File

@ -1,3 +0,0 @@
curlimage:
docker build -t gohttp2/curl .

View File

@ -581,9 +581,11 @@ type serverConn struct {
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push curPushedStreams uint32 // number of open streams initiated by server push
curHandlers uint32 // number of running handler goroutines
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32 initialStreamSendWindowSize int32
maxFrameSize int32 maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default) peerMaxHeaderListSize uint32 // zero means unknown (default)
@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
return return
case gracefulShutdownMsg: case gracefulShutdownMsg:
sc.startGracefulShutdownInternal() sc.startGracefulShutdownInternal()
case handlerDoneMsg:
sc.handlerDone()
default: default:
panic("unknown timer") panic("unknown timer")
} }
@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
} }
} }
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int type serverMessage int
// Message values sent to serveMsgCh. // Message values sent to serveMsgCh.
@ -1028,6 +1024,7 @@ var (
idleTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
) )
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
@ -1900,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
// onReadTimeout is run on its own goroutine (from time.AfterFunc) // onReadTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's ReadTimeout has fired. // when the stream's ReadTimeout has fired.
func (st *stream) onReadTimeout() { func (st *stream) onReadTimeout() {
// Wrap the ErrDeadlineExceeded to avoid callers depending on us if st.body != nil {
// returning the bare error. // Wrap the ErrDeadlineExceeded to avoid callers depending on us
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) // returning the bare error.
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
}
} }
// onWriteTimeout is run on its own goroutine (from time.AfterFunc) // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
@ -2020,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway. // (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 { if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{}) sc.conn.SetReadDeadline(time.Time{})
if st.body != nil { st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
} }
go sc.runHandler(rw, req, handler) return sc.scheduleHandler(id, rw, req, handler)
return nil
} }
func (sc *serverConn) upgradeRequest(req *http.Request) { func (sc *serverConn) upgradeRequest(req *http.Request) {
@ -2046,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
sc.conn.SetReadDeadline(time.Time{}) sc.conn.SetReadDeadline(time.Time{})
} }
// This is the first request on the connection,
// so start the handler directly rather than going
// through scheduleHandler.
sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP) go sc.runHandler(rw, req, sc.handler.ServeHTTP)
} }
@ -2286,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
return &responseWriter{rws: rws} return &responseWriter{rws: rws}
} }
type unstartedHandler struct {
streamID uint32
rw *responseWriter
req *http.Request
handler func(http.ResponseWriter, *http.Request)
}
// scheduleHandler starts a handler goroutine,
// or schedules one to start as soon as an existing handler finishes.
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
sc.serveG.check()
maxHandlers := sc.advMaxStreams
if sc.curHandlers < maxHandlers {
sc.curHandlers++
go sc.runHandler(rw, req, handler)
return nil
}
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
}
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
streamID: streamID,
rw: rw,
req: req,
handler: handler,
})
return nil
}
func (sc *serverConn) handlerDone() {
sc.serveG.check()
sc.curHandlers--
i := 0
maxHandlers := sc.advMaxStreams
for ; i < len(sc.unstartedHandlers); i++ {
u := sc.unstartedHandlers[i]
if sc.streams[u.streamID] == nil {
// This stream was reset before its goroutine had a chance to start.
continue
}
if sc.curHandlers >= maxHandlers {
break
}
sc.curHandlers++
go sc.runHandler(u.rw, u.req, u.handler)
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
}
sc.unstartedHandlers = sc.unstartedHandlers[i:]
if len(sc.unstartedHandlers) == 0 {
sc.unstartedHandlers = nil
}
}
// Run on its own goroutine. // Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true didPanic := true
defer func() { defer func() {
rw.rws.stream.cancelCtx() rw.rws.stream.cancelCtx()

View File

@ -19,6 +19,7 @@ import (
"io/fs" "io/fs"
"log" "log"
"math" "math"
"math/bits"
mathrand "math/rand" mathrand "math/rand"
"net" "net"
"net/http" "net/http"
@ -290,8 +291,7 @@ func (t *Transport) initConnPool() {
// HTTP/2 server. // HTTP/2 server.
type ClientConn struct { type ClientConn struct {
t *Transport t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls tconn net.Conn // usually *tls.Conn, except specialized impls
tconnClosed bool
tlsState *tls.ConnectionState // nil only for specialized impls tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request singleUse bool // whether being used for a single http.Request
@ -518,11 +518,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
func authorityAddr(scheme string, authority string) (addr string) { func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority) host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port if err != nil { // authority didn't have a port
host = authority
port = ""
}
if port == "" { // authority's port was empty
port = "443" port = "443"
if scheme == "http" { if scheme == "http" {
port = "80" port = "80"
} }
host = authority
} }
if a, err := idna.ToASCII(host); err == nil { if a, err := idna.ToASCII(host); err == nil {
host = a host = a
@ -1677,7 +1680,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K return int(n) // doesn't truncate; max is 512K
} }
var bufPool sync.Pool // of *[]byte // Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
// requests needing big buffers. The size ranges are as follows:
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
// {256 KB, 512 KB], {512 KB, infinity}
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
// It exists mainly as a safety measure, for potential future increases in max buffer size.
var bufPools [7]sync.Pool // of *[]byte
func bufPoolIndex(size int) int {
if size <= 16384 {
return 0
}
size -= 1
bits := bits.Len(uint(size))
index := bits - 14
if index >= len(bufPools) {
return len(bufPools) - 1
}
return index
}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc cc := cs.cc
@ -1695,12 +1718,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from. // Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize) scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte var buf []byte
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { index := bufPoolIndex(scratchLen)
defer bufPool.Put(bp) if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPools[index].Put(bp)
buf = *bp buf = *bp
} else { } else {
buf = make([]byte, scratchLen) buf = make([]byte, scratchLen)
defer bufPool.Put(&buf) defer bufPools[index].Put(&buf)
} }
var sawEOF bool var sawEOF bool

5
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View File

@ -38,7 +38,7 @@ var X86 struct {
HasAVX512F bool // Advanced vector extension 512 Foundation Instructions HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions
HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
@ -54,6 +54,9 @@ var X86 struct {
HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
HasAMXTile bool // Advanced Matrix Extension Tile instructions
HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions
HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions
HasBMI1 bool // Bit manipulation instruction set 1 HasBMI1 bool // Bit manipulation instruction set 1
HasBMI2 bool // Bit manipulation instruction set 2 HasBMI2 bool // Bit manipulation instruction set 2
HasCX16 bool // Compare and exchange 16 Bytes HasCX16 bool // Compare and exchange 16 Bytes

View File

@ -7,6 +7,6 @@
package cpu package cpu
const cacheLineSize = 32 const cacheLineSize = 64
func initOptions() {} func initOptions() {}

View File

@ -37,6 +37,9 @@ func initOptions() {
{Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
{Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
{Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, {Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
{Name: "amxtile", Feature: &X86.HasAMXTile},
{Name: "amxint8", Feature: &X86.HasAMXInt8},
{Name: "amxbf16", Feature: &X86.HasAMXBF16},
{Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi1", Feature: &X86.HasBMI1},
{Name: "bmi2", Feature: &X86.HasBMI2}, {Name: "bmi2", Feature: &X86.HasBMI2},
{Name: "cx16", Feature: &X86.HasCX16}, {Name: "cx16", Feature: &X86.HasCX16},
@ -138,6 +141,10 @@ func archInit() {
eax71, _, _, _ := cpuid(7, 1) eax71, _, _, _ := cpuid(7, 1)
X86.HasAVX512BF16 = isSet(5, eax71) X86.HasAVX512BF16 = isSet(5, eax71)
} }
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
} }
func isSet(bitpos uint, value uint32) bool { func isSet(bitpos uint, value uint32) bool {

View File

@ -5,7 +5,7 @@
package cpu package cpu
import ( import (
"io/ioutil" "os"
) )
const ( const (
@ -39,7 +39,7 @@ func readHWCAP() error {
return nil return nil
} }
buf, err := ioutil.ReadFile(procAuxv) buf, err := os.ReadFile(procAuxv)
if err != nil { if err != nil {
// e.g. on android /proc/self/auxv is not accessible, so silently // e.g. on android /proc/self/auxv is not accessible, so silently
// ignore the error and leave Initialized = false. On some // ignore the error and leave Initialized = false. On some

View File

@ -1,30 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unsafeheader contains header declarations for the Go runtime's
// slice and string implementations.
//
// This package allows x/sys to use types equivalent to
// reflect.SliceHeader and reflect.StringHeader without introducing
// a dependency on the (relatively heavy) "reflect" package.
package unsafeheader
import (
"unsafe"
)
// Slice is the runtime representation of a slice.
// It cannot be used safely or portably and its representation may change in a later release.
type Slice struct {
Data unsafe.Pointer
Len int
Cap int
}
// String is the runtime representation of a string.
// It cannot be used safely or portably and its representation may change in a later release.
type String struct {
Data unsafe.Pointer
Len int
}

View File

@ -583,6 +583,7 @@ ccflags="$@"
$2 ~ /^PERF_/ || $2 ~ /^PERF_/ ||
$2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SECCOMP_MODE_/ ||
$2 ~ /^SEEK_/ || $2 ~ /^SEEK_/ ||
$2 ~ /^SCHED_/ ||
$2 ~ /^SPLICE_/ || $2 ~ /^SPLICE_/ ||
$2 ~ /^SYNC_FILE_RANGE_/ || $2 ~ /^SYNC_FILE_RANGE_/ ||
$2 !~ /IOC_MAGIC/ && $2 !~ /IOC_MAGIC/ &&
@ -624,7 +625,7 @@ ccflags="$@"
$2 ~ /^MEM/ || $2 ~ /^MEM/ ||
$2 ~ /^WG/ || $2 ~ /^WG/ ||
$2 ~ /^FIB_RULE_/ || $2 ~ /^FIB_RULE_/ ||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__WCOREFLAG$/ {next}
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}

14
vendor/golang.org/x/sys/unix/mmap_nomremap.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris
// +build aix darwin dragonfly freebsd openbsd solaris
package unix
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build linux //go:build linux || netbsd
// +build linux // +build linux netbsd
package unix package unix
@ -14,8 +14,17 @@ type mremapMmapper struct {
mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
} }
var mapper = &mremapMmapper{
mmapper: mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
},
mremap: mremap,
}
func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 { if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 {
return nil, EINVAL return nil, EINVAL
} }
@ -32,9 +41,13 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [
} }
bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength)
pNew := &bNew[cap(bNew)-1] pNew := &bNew[cap(bNew)-1]
if flags&MREMAP_DONTUNMAP == 0 { if flags&mremapDontunmap == 0 {
delete(m.active, pOld) delete(m.active, pOld)
} }
m.active[pNew] = bNew m.active[pNew] = bNew
return bNew, nil return bNew, nil
} }
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
return mapper.Mremap(oldData, newLength, flags)
}

View File

@ -7,12 +7,6 @@
package unix package unix
import "unsafe"
func ptrace(request int, pid int, addr uintptr, data uintptr) error { func ptrace(request int, pid int, addr uintptr, data uintptr) error {
return ptrace1(request, pid, addr, data) return ptrace1(request, pid, addr, data)
} }
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error {
return ptrace1Ptr(request, pid, addr, data)
}

View File

@ -7,12 +7,6 @@
package unix package unix
import "unsafe"
func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
return ENOTSUP return ENOTSUP
} }
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
return ENOTSUP
}

View File

@ -487,8 +487,6 @@ func Fsync(fd int) error {
//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unlinkat(dirfd int, path string, flags int) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error)
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys readlen(fd int, p *byte, np int) (n int, err error) = read
//sys writelen(fd int, p *byte, np int) (n int, err error) = write
//sys Dup2(oldfd int, newfd int) (err error) //sys Dup2(oldfd int, newfd int) (err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64
@ -535,21 +533,6 @@ func Fsync(fd int) error {
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
//sys Madvise(b []byte, advice int) (err error) //sys Madvise(b []byte, advice int) (err error)
//sys Mprotect(b []byte, prot int) (err error) //sys Mprotect(b []byte, prot int) (err error)
//sys Mlock(b []byte) (err error) //sys Mlock(b []byte) (err error)

View File

@ -601,20 +601,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
// Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Gethostuuid(uuid *byte, timeout *Timespec) (err error)
// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error)
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
//sys Madvise(b []byte, behav int) (err error) //sys Madvise(b []byte, behav int) (err error)
//sys Mlock(b []byte) (err error) //sys Mlock(b []byte) (err error)
//sys Mlockall(flags int) (err error) //sys Mlockall(flags int) (err error)

View File

@ -510,30 +510,36 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
return nil, err return nil, err
} }
// Find size. for {
n := uintptr(0) // Find size.
if err := sysctl(mib, nil, &n, nil, 0); err != nil { n := uintptr(0)
return nil, err if err := sysctl(mib, nil, &n, nil, 0); err != nil {
} return nil, err
if n == 0 { }
return nil, nil if n == 0 {
} return nil, nil
if n%SizeofKinfoProc != 0 { }
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) if n%SizeofKinfoProc != 0 {
} return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
}
// Read into buffer of that size. // Read into buffer of that size.
buf := make([]KinfoProc, n/SizeofKinfoProc) buf := make([]KinfoProc, n/SizeofKinfoProc)
if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
return nil, err if err == ENOMEM {
} // Process table grew. Try again.
if n%SizeofKinfoProc != 0 { continue
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) }
} return nil, err
}
if n%SizeofKinfoProc != 0 {
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
}
// The actual call may return less than the original reported required // The actual call may return less than the original reported required
// size so ensure we deal with that. // size so ensure we deal with that.
return buf[:n/SizeofKinfoProc], nil return buf[:n/SizeofKinfoProc], nil
}
} }
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
@ -638,189 +644,3 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
/*
* Unimplemented
*/
// Profil
// Sigaction
// Sigprocmask
// Getlogin
// Sigpending
// Sigaltstack
// Ioctl
// Reboot
// Execve
// Vfork
// Sbrk
// Sstk
// Ovadvise
// Mincore
// Setitimer
// Swapon
// Select
// Sigsuspend
// Readv
// Writev
// Nfssvc
// Getfh
// Quotactl
// Csops
// Waitid
// Add_profil
// Kdebug_trace
// Sigreturn
// Atsocket
// Kqueue_from_portset_np
// Kqueue_portset
// Getattrlist
// Getdirentriesattr
// Searchfs
// Delete
// Copyfile
// Watchevent
// Waitevent
// Modwatch
// Fsctl
// Initgroups
// Posix_spawn
// Nfsclnt
// Fhopen
// Minherit
// Semsys
// Msgsys
// Shmsys
// Semctl
// Semget
// Semop
// Msgctl
// Msgget
// Msgsnd
// Msgrcv
// Shm_open
// Shm_unlink
// Sem_open
// Sem_close
// Sem_unlink
// Sem_wait
// Sem_trywait
// Sem_post
// Sem_getvalue
// Sem_init
// Sem_destroy
// Open_extended
// Umask_extended
// Stat_extended
// Lstat_extended
// Fstat_extended
// Chmod_extended
// Fchmod_extended
// Access_extended
// Settid
// Gettid
// Setsgroups
// Getsgroups
// Setwgroups
// Getwgroups
// Mkfifo_extended
// Mkdir_extended
// Identitysvc
// Shared_region_check_np
// Shared_region_map_np
// __pthread_mutex_destroy
// __pthread_mutex_init
// __pthread_mutex_lock
// __pthread_mutex_trylock
// __pthread_mutex_unlock
// __pthread_cond_init
// __pthread_cond_destroy
// __pthread_cond_broadcast
// __pthread_cond_signal
// Setsid_with_pid
// __pthread_cond_timedwait
// Aio_fsync
// Aio_return
// Aio_suspend
// Aio_cancel
// Aio_error
// Aio_read
// Aio_write
// Lio_listio
// __pthread_cond_wait
// Iopolicysys
// __pthread_kill
// __pthread_sigmask
// __sigwait
// __disable_threadsignal
// __pthread_markcancel
// __pthread_canceled
// __semwait_signal
// Proc_info
// sendfile
// Stat64_extended
// Lstat64_extended
// Fstat64_extended
// __pthread_chdir
// __pthread_fchdir
// Audit
// Auditon
// Getauid
// Setauid
// Getaudit
// Setaudit
// Getaudit_addr
// Setaudit_addr
// Auditctl
// Bsdthread_create
// Bsdthread_terminate
// Stack_snapshot
// Bsdthread_register
// Workq_open
// Workq_ops
// __mac_execve
// __mac_syscall
// __mac_get_file
// __mac_set_file
// __mac_get_link
// __mac_set_link
// __mac_get_proc
// __mac_set_proc
// __mac_get_fd
// __mac_set_fd
// __mac_get_pid
// __mac_get_lcid
// __mac_get_lctx
// __mac_set_lctx
// Setlcid
// Read_nocancel
// Write_nocancel
// Open_nocancel
// Close_nocancel
// Wait4_nocancel
// Recvmsg_nocancel
// Sendmsg_nocancel
// Recvfrom_nocancel
// Accept_nocancel
// Fcntl_nocancel
// Select_nocancel
// Fsync_nocancel
// Connect_nocancel
// Sigsuspend_nocancel
// Readv_nocancel
// Writev_nocancel
// Sendto_nocancel
// Pread_nocancel
// Pwrite_nocancel
// Waitid_nocancel
// Poll_nocancel
// Msgsnd_nocancel
// Msgrcv_nocancel
// Sem_wait_nocancel
// Aio_suspend_nocancel
// __sigwait_nocancel
// __semwait_signal_nocancel
// __mac_mount
// __mac_get_mount
// __mac_getfsstat

View File

@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64

View File

@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT
//sys Lstat(path string, stat *Stat_t) (err error) //sys Lstat(path string, stat *Stat_t) (err error)
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
//sys Stat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error)
//sys Statfs(path string, stat *Statfs_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error)

View File

@ -343,203 +343,5 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
/*
* Unimplemented
* TODO(jsing): Update this list for DragonFly.
*/
// Profil
// Sigaction
// Sigprocmask
// Getlogin
// Sigpending
// Sigaltstack
// Reboot
// Execve
// Vfork
// Sbrk
// Sstk
// Ovadvise
// Mincore
// Setitimer
// Swapon
// Select
// Sigsuspend
// Readv
// Writev
// Nfssvc
// Getfh
// Quotactl
// Mount
// Csops
// Waitid
// Add_profil
// Kdebug_trace
// Sigreturn
// Atsocket
// Kqueue_from_portset_np
// Kqueue_portset
// Getattrlist
// Setattrlist
// Getdirentriesattr
// Searchfs
// Delete
// Copyfile
// Watchevent
// Waitevent
// Modwatch
// Getxattr
// Fgetxattr
// Setxattr
// Fsetxattr
// Removexattr
// Fremovexattr
// Listxattr
// Flistxattr
// Fsctl
// Initgroups
// Posix_spawn
// Nfsclnt
// Fhopen
// Minherit
// Semsys
// Msgsys
// Shmsys
// Semctl
// Semget
// Semop
// Msgctl
// Msgget
// Msgsnd
// Msgrcv
// Shmat
// Shmctl
// Shmdt
// Shmget
// Shm_open
// Shm_unlink
// Sem_open
// Sem_close
// Sem_unlink
// Sem_wait
// Sem_trywait
// Sem_post
// Sem_getvalue
// Sem_init
// Sem_destroy
// Open_extended
// Umask_extended
// Stat_extended
// Lstat_extended
// Fstat_extended
// Chmod_extended
// Fchmod_extended
// Access_extended
// Settid
// Gettid
// Setsgroups
// Getsgroups
// Setwgroups
// Getwgroups
// Mkfifo_extended
// Mkdir_extended
// Identitysvc
// Shared_region_check_np
// Shared_region_map_np
// __pthread_mutex_destroy
// __pthread_mutex_init
// __pthread_mutex_lock
// __pthread_mutex_trylock
// __pthread_mutex_unlock
// __pthread_cond_init
// __pthread_cond_destroy
// __pthread_cond_broadcast
// __pthread_cond_signal
// Setsid_with_pid
// __pthread_cond_timedwait
// Aio_fsync
// Aio_return
// Aio_suspend
// Aio_cancel
// Aio_error
// Aio_read
// Aio_write
// Lio_listio
// __pthread_cond_wait
// Iopolicysys
// __pthread_kill
// __pthread_sigmask
// __sigwait
// __disable_threadsignal
// __pthread_markcancel
// __pthread_canceled
// __semwait_signal
// Proc_info
// Stat64_extended
// Lstat64_extended
// Fstat64_extended
// __pthread_chdir
// __pthread_fchdir
// Audit
// Auditon
// Getauid
// Setauid
// Getaudit
// Setaudit
// Getaudit_addr
// Setaudit_addr
// Auditctl
// Bsdthread_create
// Bsdthread_terminate
// Stack_snapshot
// Bsdthread_register
// Workq_open
// Workq_ops
// __mac_execve
// __mac_syscall
// __mac_get_file
// __mac_set_file
// __mac_get_link
// __mac_set_link
// __mac_get_proc
// __mac_set_proc
// __mac_get_fd
// __mac_set_fd
// __mac_get_pid
// __mac_get_lcid
// __mac_get_lctx
// __mac_set_lctx
// Setlcid
// Read_nocancel
// Write_nocancel
// Open_nocancel
// Close_nocancel
// Wait4_nocancel
// Recvmsg_nocancel
// Sendmsg_nocancel
// Recvfrom_nocancel
// Accept_nocancel
// Fcntl_nocancel
// Select_nocancel
// Fsync_nocancel
// Connect_nocancel
// Sigsuspend_nocancel
// Readv_nocancel
// Writev_nocancel
// Sendto_nocancel
// Pread_nocancel
// Pwrite_nocancel
// Waitid_nocancel
// Msgsnd_nocancel
// Msgrcv_nocancel
// Sem_wait_nocancel
// Aio_suspend_nocancel
// __sigwait_nocancel
// __semwait_signal_nocancel
// __mac_mount
// __mac_get_mount
// __mac_getfsstat

View File

@ -449,197 +449,5 @@ func Dup3(oldfd, newfd, flags int) error {
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
/*
* Unimplemented
*/
// Profil
// Sigaction
// Sigprocmask
// Getlogin
// Sigpending
// Sigaltstack
// Ioctl
// Reboot
// Execve
// Vfork
// Sbrk
// Sstk
// Ovadvise
// Mincore
// Setitimer
// Swapon
// Select
// Sigsuspend
// Readv
// Writev
// Nfssvc
// Getfh
// Quotactl
// Mount
// Csops
// Waitid
// Add_profil
// Kdebug_trace
// Sigreturn
// Atsocket
// Kqueue_from_portset_np
// Kqueue_portset
// Getattrlist
// Setattrlist
// Getdents
// Getdirentriesattr
// Searchfs
// Delete
// Copyfile
// Watchevent
// Waitevent
// Modwatch
// Fsctl
// Initgroups
// Posix_spawn
// Nfsclnt
// Fhopen
// Minherit
// Semsys
// Msgsys
// Shmsys
// Semctl
// Semget
// Semop
// Msgctl
// Msgget
// Msgsnd
// Msgrcv
// Shmat
// Shmctl
// Shmdt
// Shmget
// Shm_open
// Shm_unlink
// Sem_open
// Sem_close
// Sem_unlink
// Sem_wait
// Sem_trywait
// Sem_post
// Sem_getvalue
// Sem_init
// Sem_destroy
// Open_extended
// Umask_extended
// Stat_extended
// Lstat_extended
// Fstat_extended
// Chmod_extended
// Fchmod_extended
// Access_extended
// Settid
// Gettid
// Setsgroups
// Getsgroups
// Setwgroups
// Getwgroups
// Mkfifo_extended
// Mkdir_extended
// Identitysvc
// Shared_region_check_np
// Shared_region_map_np
// __pthread_mutex_destroy
// __pthread_mutex_init
// __pthread_mutex_lock
// __pthread_mutex_trylock
// __pthread_mutex_unlock
// __pthread_cond_init
// __pthread_cond_destroy
// __pthread_cond_broadcast
// __pthread_cond_signal
// Setsid_with_pid
// __pthread_cond_timedwait
// Aio_fsync
// Aio_return
// Aio_suspend
// Aio_cancel
// Aio_error
// Aio_read
// Aio_write
// Lio_listio
// __pthread_cond_wait
// Iopolicysys
// __pthread_kill
// __pthread_sigmask
// __sigwait
// __disable_threadsignal
// __pthread_markcancel
// __pthread_canceled
// __semwait_signal
// Proc_info
// Stat64_extended
// Lstat64_extended
// Fstat64_extended
// __pthread_chdir
// __pthread_fchdir
// Audit
// Auditon
// Getauid
// Setauid
// Getaudit
// Setaudit
// Getaudit_addr
// Setaudit_addr
// Auditctl
// Bsdthread_create
// Bsdthread_terminate
// Stack_snapshot
// Bsdthread_register
// Workq_open
// Workq_ops
// __mac_execve
// __mac_syscall
// __mac_get_file
// __mac_set_file
// __mac_get_link
// __mac_set_link
// __mac_get_proc
// __mac_set_proc
// __mac_get_fd
// __mac_set_fd
// __mac_get_pid
// __mac_get_lcid
// __mac_get_lctx
// __mac_set_lctx
// Setlcid
// Read_nocancel
// Write_nocancel
// Open_nocancel
// Close_nocancel
// Wait4_nocancel
// Recvmsg_nocancel
// Sendmsg_nocancel
// Recvfrom_nocancel
// Accept_nocancel
// Fcntl_nocancel
// Select_nocancel
// Fsync_nocancel
// Connect_nocancel
// Sigsuspend_nocancel
// Readv_nocancel
// Writev_nocancel
// Sendto_nocancel
// Pread_nocancel
// Pwrite_nocancel
// Waitid_nocancel
// Poll_nocancel
// Msgsnd_nocancel
// Msgrcv_nocancel
// Sem_wait_nocancel
// Aio_suspend_nocancel
// __sigwait_nocancel
// __semwait_signal_nocancel
// __mac_mount
// __mac_get_mount
// __mac_getfsstat

View File

@ -693,10 +693,10 @@ type SockaddrALG struct {
func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) {
// Leave room for NUL byte terminator. // Leave room for NUL byte terminator.
if len(sa.Type) > 13 { if len(sa.Type) > len(sa.raw.Type)-1 {
return nil, 0, EINVAL return nil, 0, EINVAL
} }
if len(sa.Name) > 63 { if len(sa.Name) > len(sa.raw.Name)-1 {
return nil, 0, EINVAL return nil, 0, EINVAL
} }
@ -704,17 +704,8 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Feat = sa.Feature sa.raw.Feat = sa.Feature
sa.raw.Mask = sa.Mask sa.raw.Mask = sa.Mask
typ, err := ByteSliceFromString(sa.Type) copy(sa.raw.Type[:], sa.Type)
if err != nil { copy(sa.raw.Name[:], sa.Name)
return nil, 0, err
}
name, err := ByteSliceFromString(sa.Name)
if err != nil {
return nil, 0, err
}
copy(sa.raw.Type[:], typ)
copy(sa.raw.Name[:], name)
return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil
} }
@ -1885,7 +1876,7 @@ func Getpgrp() (pid int) {
//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error)
//sys read(fd int, p []byte) (n int, err error) //sys read(fd int, p []byte) (n int, err error)
//sys Removexattr(path string, attr string) (err error) //sys Removexattr(path string, attr string) (err error)
//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
@ -1988,8 +1979,6 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
//sys Unshare(flags int) (err error) //sys Unshare(flags int) (err error)
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys exitThread(code int) (err error) = SYS_EXIT //sys exitThread(code int) (err error) = SYS_EXIT
//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
//sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV
//sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV
//sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV
@ -2125,28 +2114,6 @@ func writevRacedetect(iovecs []Iovec, n int) {
// mmap varies by architecture; see syscall_linux_*.go. // mmap varies by architecture; see syscall_linux_*.go.
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) //sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
var mapper = &mremapMmapper{
mmapper: mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
},
mremap: mremap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
return mapper.Mremap(oldData, newLength, flags)
}
//sys Madvise(b []byte, advice int) (err error) //sys Madvise(b []byte, advice int) (err error)
//sys Mprotect(b []byte, prot int) (err error) //sys Mprotect(b []byte, prot int) (err error)
//sys Mlock(b []byte) (err error) //sys Mlock(b []byte) (err error)
@ -2155,6 +2122,12 @@ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
//sys Munlock(b []byte) (err error) //sys Munlock(b []byte) (err error)
//sys Munlockall() (err error) //sys Munlockall() (err error)
const (
mremapFixed = MREMAP_FIXED
mremapDontunmap = MREMAP_DONTUNMAP
mremapMaymove = MREMAP_MAYMOVE
)
// Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd,
// using the specified flags. // using the specified flags.
func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
@ -2454,98 +2427,58 @@ func Getresgid() (rgid, egid, sgid int) {
return int(r), int(e), int(s) return int(r), int(e), int(s)
} }
/* // Pselect is a wrapper around the Linux pselect6 system call.
* Unimplemented // This version does not modify the timeout argument.
*/ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
// AfsSyscall // Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES,
// ArchPrctl // The Linux pselect6() system call modifies its timeout argument.
// Brk // [Not modifying the argument] is the behavior required by POSIX.1-2001.
// ClockNanosleep var mutableTimeout *Timespec
// ClockSettime if timeout != nil {
// Clone mutableTimeout = new(Timespec)
// EpollCtlOld *mutableTimeout = *timeout
// EpollPwait }
// EpollWaitOld
// Execve // The final argument of the pselect6() system call is not a
// Fork // sigset_t * pointer, but is instead a structure
// Futex var kernelMask *sigset_argpack
// GetKernelSyms if sigmask != nil {
// GetMempolicy wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize
// GetRobustList
// GetThreadArea // A sigset stores one bit per signal,
// Getpmsg // offset by 1 (because signal 0 does not exist).
// IoCancel // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉.
// IoDestroy sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits)
// IoGetevents
// IoSetup sigsetBytes := uintptr(sigsetWords * (wordBits / 8))
// IoSubmit kernelMask = &sigset_argpack{
// IoprioGet ss: sigmask,
// IoprioSet ssLen: sigsetBytes,
// KexecLoad }
// LookupDcookie }
// Mbind
// MigratePages return pselect6(nfd, r, w, e, mutableTimeout, kernelMask)
// Mincore }
// ModifyLdt
// Mount //sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error)
// MovePages //sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error)
// MqGetsetattr
// MqNotify // SchedSetAttr is a wrapper for sched_setattr(2) syscall.
// MqOpen // https://man7.org/linux/man-pages/man2/sched_setattr.2.html
// MqTimedreceive func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error {
// MqTimedsend if attr == nil {
// MqUnlink return EINVAL
// Msgctl }
// Msgget attr.Size = SizeofSchedAttr
// Msgrcv return schedSetattr(pid, attr, flags)
// Msgsnd }
// Nfsservctl
// Personality // SchedGetAttr is a wrapper for sched_getattr(2) syscall.
// Pselect6 // https://man7.org/linux/man-pages/man2/sched_getattr.2.html
// Ptrace func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
// Putpmsg attr := &SchedAttr{}
// Quotactl if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil {
// Readahead return nil, err
// Readv }
// RemapFilePages return attr, nil
// RestartSyscall }
// RtSigaction
// RtSigpending
// RtSigqueueinfo
// RtSigreturn
// RtSigsuspend
// RtSigtimedwait
// SchedGetPriorityMax
// SchedGetPriorityMin
// SchedGetparam
// SchedGetscheduler
// SchedRrGetInterval
// SchedSetparam
// SchedYield
// Security
// Semctl
// Semget
// Semop
// Semtimedop
// SetMempolicy
// SetRobustList
// SetThreadArea
// SetTidAddress
// Sigaltstack
// Swapoff
// Swapon
// Sysfs
// TimerCreate
// TimerDelete
// TimerGetoverrun
// TimerGettime
// TimerSettime
// Tkill (obsolete)
// Tuxcall
// Umount2
// Uselib
// Utimensat
// Vfork
// Vhangup
// Vserver
// _Sysctl

View File

@ -40,7 +40,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
if timeout != nil { if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
} }
return Pselect(nfd, r, w, e, ts, nil) return pselect6(nfd, r, w, e, ts, nil)
} }
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)

View File

@ -33,7 +33,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
if timeout != nil { if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
} }
return Pselect(nfd, r, w, e, ts, nil) return pselect6(nfd, r, w, e, ts, nil)
} }
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)

View File

@ -28,7 +28,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
if timeout != nil { if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
} }
return Pselect(nfd, r, w, e, ts, nil) return pselect6(nfd, r, w, e, ts, nil)
} }
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)

View File

@ -31,7 +31,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
if timeout != nil { if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
} }
return Pselect(nfd, r, w, e, ts, nil) return pselect6(nfd, r, w, e, ts, nil)
} }
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)

View File

@ -32,7 +32,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
if timeout != nil { if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
} }
return Pselect(nfd, r, w, e, ts, nil) return pselect6(nfd, r, w, e, ts, nil)
} }
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
@ -177,3 +177,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
} }
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
} }
//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error)
func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) {
var setSize uintptr
if set != nil {
setSize = uintptr(unsafe.Sizeof(*set))
}
return riscvHWProbe(pairs, setSize, set, flags)
}

View File

@ -356,266 +356,16 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
/* const (
* Unimplemented mremapFixed = MAP_FIXED
*/ mremapDontunmap = 0
// ____semctl13 mremapMaymove = 0
// __clone )
// __fhopen40
// __fhstat40 //sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP
// __fhstatvfs140
// __fstat30 func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) {
// __getcwd return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags)
// __getfh30 }
// __getlogin
// __lstat30
// __mount50
// __msgctl13
// __msync13
// __ntp_gettime30
// __posix_chown
// __posix_fchown
// __posix_lchown
// __posix_rename
// __setlogin
// __shmctl13
// __sigaction_sigtramp
// __sigaltstack14
// __sigpending14
// __sigprocmask14
// __sigsuspend14
// __sigtimedwait
// __stat30
// __syscall
// __vfork14
// _ksem_close
// _ksem_destroy
// _ksem_getvalue
// _ksem_init
// _ksem_open
// _ksem_post
// _ksem_trywait
// _ksem_unlink
// _ksem_wait
// _lwp_continue
// _lwp_create
// _lwp_ctl
// _lwp_detach
// _lwp_exit
// _lwp_getname
// _lwp_getprivate
// _lwp_kill
// _lwp_park
// _lwp_self
// _lwp_setname
// _lwp_setprivate
// _lwp_suspend
// _lwp_unpark
// _lwp_unpark_all
// _lwp_wait
// _lwp_wakeup
// _pset_bind
// _sched_getaffinity
// _sched_getparam
// _sched_setaffinity
// _sched_setparam
// acct
// aio_cancel
// aio_error
// aio_fsync
// aio_read
// aio_return
// aio_suspend
// aio_write
// break
// clock_getres
// clock_gettime
// clock_settime
// compat_09_ogetdomainname
// compat_09_osetdomainname
// compat_09_ouname
// compat_10_omsgsys
// compat_10_osemsys
// compat_10_oshmsys
// compat_12_fstat12
// compat_12_getdirentries
// compat_12_lstat12
// compat_12_msync
// compat_12_oreboot
// compat_12_oswapon
// compat_12_stat12
// compat_13_sigaction13
// compat_13_sigaltstack13
// compat_13_sigpending13
// compat_13_sigprocmask13
// compat_13_sigreturn13
// compat_13_sigsuspend13
// compat_14___semctl
// compat_14_msgctl
// compat_14_shmctl
// compat_16___sigaction14
// compat_16___sigreturn14
// compat_20_fhstatfs
// compat_20_fstatfs
// compat_20_getfsstat
// compat_20_statfs
// compat_30___fhstat30
// compat_30___fstat13
// compat_30___lstat13
// compat_30___stat13
// compat_30_fhopen
// compat_30_fhstat
// compat_30_fhstatvfs1
// compat_30_getdents
// compat_30_getfh
// compat_30_ntp_gettime
// compat_30_socket
// compat_40_mount
// compat_43_fstat43
// compat_43_lstat43
// compat_43_oaccept
// compat_43_ocreat
// compat_43_oftruncate
// compat_43_ogetdirentries
// compat_43_ogetdtablesize
// compat_43_ogethostid
// compat_43_ogethostname
// compat_43_ogetkerninfo
// compat_43_ogetpagesize
// compat_43_ogetpeername
// compat_43_ogetrlimit
// compat_43_ogetsockname
// compat_43_okillpg
// compat_43_olseek
// compat_43_ommap
// compat_43_oquota
// compat_43_orecv
// compat_43_orecvfrom
// compat_43_orecvmsg
// compat_43_osend
// compat_43_osendmsg
// compat_43_osethostid
// compat_43_osethostname
// compat_43_osigblock
// compat_43_osigsetmask
// compat_43_osigstack
// compat_43_osigvec
// compat_43_otruncate
// compat_43_owait
// compat_43_stat43
// execve
// extattr_delete_fd
// extattr_delete_file
// extattr_delete_link
// extattr_get_fd
// extattr_get_file
// extattr_get_link
// extattr_list_fd
// extattr_list_file
// extattr_list_link
// extattr_set_fd
// extattr_set_file
// extattr_set_link
// extattrctl
// fchroot
// fdatasync
// fgetxattr
// fktrace
// flistxattr
// fork
// fremovexattr
// fsetxattr
// fstatvfs1
// fsync_range
// getcontext
// getitimer
// getvfsstat
// getxattr
// ktrace
// lchflags
// lchmod
// lfs_bmapv
// lfs_markv
// lfs_segclean
// lfs_segwait
// lgetxattr
// lio_listio
// listxattr
// llistxattr
// lremovexattr
// lseek
// lsetxattr
// lutimes
// madvise
// mincore
// minherit
// modctl
// mq_close
// mq_getattr
// mq_notify
// mq_open
// mq_receive
// mq_send
// mq_setattr
// mq_timedreceive
// mq_timedsend
// mq_unlink
// mremap
// msgget
// msgrcv
// msgsnd
// nfssvc
// ntp_adjtime
// pmc_control
// pmc_get_info
// pollts
// preadv
// profil
// pselect
// pset_assign
// pset_create
// pset_destroy
// ptrace
// pwritev
// quotactl
// rasctl
// readv
// reboot
// removexattr
// sa_enable
// sa_preempt
// sa_register
// sa_setconcurrency
// sa_stacks
// sa_yield
// sbrk
// sched_yield
// semconfig
// semget
// semop
// setcontext
// setitimer
// setxattr
// shmat
// shmdt
// shmget
// sstk
// statvfs1
// swapctl
// sysarch
// syscall
// timer_create
// timer_delete
// timer_getoverrun
// timer_gettime
// timer_settime
// undelete
// utrace
// uuidgen
// vadvise
// vfork
// writev

View File

@ -326,78 +326,4 @@ func Uname(uname *Utsname) error {
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error) //sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
/*
* Unimplemented
*/
// __getcwd
// __semctl
// __syscall
// __sysctl
// adjfreq
// break
// clock_getres
// clock_gettime
// clock_settime
// closefrom
// execve
// fhopen
// fhstat
// fhstatfs
// fork
// futimens
// getfh
// getgid
// getitimer
// getlogin
// getthrid
// ktrace
// lfs_bmapv
// lfs_markv
// lfs_segclean
// lfs_segwait
// mincore
// minherit
// mount
// mquery
// msgctl
// msgget
// msgrcv
// msgsnd
// nfssvc
// nnpfspioctl
// preadv
// profil
// pwritev
// quotactl
// readv
// reboot
// renameat
// rfork
// sched_yield
// semget
// semop
// setgroups
// setitimer
// setsockopt
// shmat
// shmctl
// shmdt
// shmget
// sigaction
// sigaltstack
// sigpending
// sigprocmask
// sigreturn
// sigsuspend
// sysarch
// syscall
// threxit
// thrsigdivert
// thrsleep
// thrwakeup
// vfork
// writev

View File

@ -698,38 +698,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
// Event Ports // Event Ports
type fileObjCookie struct { type fileObjCookie struct {

View File

@ -147,6 +147,14 @@ func (m *mmapper) Munmap(data []byte) (err error) {
return nil return nil
} }
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
func Read(fd int, p []byte) (n int, err error) { func Read(fd int, p []byte) (n int, err error) {
n, err = read(fd, p) n, err = read(fd, p)
if raceenabled { if raceenabled {
@ -541,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) {
if err != nil { if err != nil {
return err return err
} }
if (flag&O_NONBLOCK != 0) == nonblocking {
return nil
}
if nonblocking { if nonblocking {
flag |= O_NONBLOCK flag |= O_NONBLOCK
} else { } else {

View File

@ -192,7 +192,6 @@ func (cmsg *Cmsghdr) SetLen(length int) {
//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys read(fd int, p []byte) (n int, err error) //sys read(fd int, p []byte) (n int, err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys write(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error)
//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A
@ -285,25 +284,11 @@ func Close(fd int) (err error) {
return return
} }
var mapper = &mmapper{
active: make(map[*byte][]byte),
mmap: mmap,
munmap: munmap,
}
// Dummy function: there are no semantics for Madvise on z/OS // Dummy function: there are no semantics for Madvise on z/OS
func Madvise(b []byte, advice int) (err error) { func Madvise(b []byte, advice int) (err error) {
return return
} }
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return mapper.Mmap(fd, offset, length, prot, flags)
}
func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
//sysnb Getegid() (egid int) //sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int) //sysnb Geteuid() (uid int)

Some files were not shown because too many files have changed in this diff Show More