diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 6efbdc6ea..6cb75e18c 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -22,7 +22,7 @@ content/en/docs/collector @open-telemetry/docs-approvers @open-te
content/en/docs/demo @open-telemetry/docs-approvers @open-telemetry/demo-approvers
content/en/docs/instrumentation/cpp/ @open-telemetry/docs-approvers @open-telemetry/cpp-maintainers
content/en/docs/instrumentation/erlang/ @open-telemetry/docs-approvers @open-telemetry/erlang-approvers
-# go is a content module
+content/en/docs/instrumentation/go/ @open-telemetry/docs-approvers @open-telemetry/go-instrumentation-approvers
content/en/docs/instrumentation/java/ @open-telemetry/docs-approvers @open-telemetry/java-maintainers @open-telemetry/java-instrumentation-maintainers
content/en/docs/instrumentation/js/ @open-telemetry/docs-approvers @open-telemetry/javascript-approvers
content/en/docs/instrumentation/net/ @open-telemetry/docs-approvers @open-telemetry/dotnet-approvers @open-telemetry/dotnet-instrumentation-approvers
diff --git a/.gitmodules b/.gitmodules
index c634f7350..f496e9754 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,9 +2,6 @@
path = themes/docsy
url = https://github.com/cncf/docsy.git
branch = opentelemetry.io
-[submodule "content-modules/opentelemetry-go"]
- path = content-modules/opentelemetry-go
- url = https://github.com/open-telemetry/opentelemetry-go
[submodule "content-modules/opentelemetry-specification"]
path = content-modules/opentelemetry-specification
url = https://github.com/open-telemetry/opentelemetry-specification.git
diff --git a/.prettierignore b/.prettierignore
index 57163349c..54ed30887 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -18,12 +18,6 @@
!/content-modules/community/mission-vision-values.md
!/content-modules/community/roadmap.md
-!/content-modules/opentelemetry-go
-/content-modules/opentelemetry-go/*
-!/content-modules/opentelemetry-go/website_docs
-/content-modules/opentelemetry-go/website_docs/**/*
-!/content-modules/opentelemetry-go/website_docs/**/*.md
-
# Ignore generated resources
package-lock.json
diff --git a/content-modules/opentelemetry-go b/content-modules/opentelemetry-go
deleted file mode 160000
index 4f4815406..000000000
--- a/content-modules/opentelemetry-go
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 4f4815406aff7889cee8b93396ac373e9240fe55
diff --git a/content/en/docs/instrumentation/go/_index.md b/content/en/docs/instrumentation/go/_index.md
new file mode 100644
index 000000000..8dd97f21c
--- /dev/null
+++ b/content/en/docs/instrumentation/go/_index.md
@@ -0,0 +1,14 @@
+---
+title: Go
+description: >-
+
A language-specific implementation of OpenTelemetry in Go.
+aliases: [/golang, /golang/metrics, /golang/tracing]
+weight: 16
+---
+
+{{% docs/instrumentation/index-intro go /%}}
+
+## More
+
+- [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib)
diff --git a/content/en/docs/instrumentation/go/api.md b/content/en/docs/instrumentation/go/api.md
new file mode 100644
index 000000000..9bdbc6b53
--- /dev/null
+++ b/content/en/docs/instrumentation/go/api.md
@@ -0,0 +1,8 @@
+---
+title: API reference
+linkTitle: API
+redirect: https://pkg.go.dev/go.opentelemetry.io/otel
+manualLinkTarget: _blank
+_build: { render: link }
+weight: 210
+---
diff --git a/content/en/docs/instrumentation/go/examples.md b/content/en/docs/instrumentation/go/examples.md
new file mode 100644
index 000000000..05db2e283
--- /dev/null
+++ b/content/en/docs/instrumentation/go/examples.md
@@ -0,0 +1,7 @@
+---
+title: Examples
+redirect: https://github.com/open-telemetry/opentelemetry-go/tree/main/example
+manualLinkTarget: _blank
+_build: { render: link }
+weight: 220
+---
diff --git a/content/en/docs/instrumentation/go/exporters.md b/content/en/docs/instrumentation/go/exporters.md
new file mode 100644
index 000000000..64df5d034
--- /dev/null
+++ b/content/en/docs/instrumentation/go/exporters.md
@@ -0,0 +1,57 @@
+---
+title: Exporters
+aliases: [/docs/instrumentation/go/exporting_data]
+weight: 50
+---
+
+In order to visualize and analyze your [traces](/docs/concepts/signals/traces/)
+and metrics, you will need to export them to a backend.
+
+## OTLP endpoint
+
+To send trace data to an OTLP endpoint (like the [collector](/docs/collector) or
+Jaeger >= v1.35.0) you'll want to configure an OTLP exporter that sends to your
+endpoint.
+
+### Using HTTP
+
+```go
+import (
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+)
+
+func installExportPipeline(ctx context.Context) (func(context.Context) error, error) {
+ client := otlptracehttp.NewClient()
+ exporter, err := otlptrace.New(ctx, client)
+ if err != nil {
+ return nil, fmt.Errorf("creating OTLP trace exporter: %w", err)
+ }
+ /* … */
+}
+```
+
+To learn more on how to use the OTLP HTTP exporter, try out the
+[otel-collector](https://github.com/open-telemetry/opentelemetry-go/tree/main/example/otel-collector)
+
+### Jaeger
+
+To try out the OTLP exporter, since v1.35.0 you can run
+[Jaeger](https://www.jaegertracing.io/) as an OTLP endpoint and for trace
+visualization in a docker container:
+
+```shell
+docker run -d --name jaeger \
+ -e COLLECTOR_OTLP_ENABLED=true \
+ -p 16686:16686 \
+ -p 4318:4318 \
+ jaegertracing/all-in-one:latest
+```
+
+## Prometheus
+
+Prometheus export is available in the
+`go.opentelemetry.io/otel/exporters/prometheus` package.
+
+Please find more documentation on
+[GitHub](https://github.com/open-telemetry/opentelemetry-go/tree/main/exporters/prometheus)
diff --git a/content/en/docs/instrumentation/go/getting-started.md b/content/en/docs/instrumentation/go/getting-started.md
new file mode 100644
index 000000000..6cf5c28e4
--- /dev/null
+++ b/content/en/docs/instrumentation/go/getting-started.md
@@ -0,0 +1,693 @@
+---
+title: Getting Started
+weight: 10
+---
+
+Welcome to the OpenTelemetry for Go getting started guide! This guide will walk
+you through the basic steps in installing, instrumenting with, configuring, and
+exporting data from OpenTelemetry. Before you get started, be sure to have Go
+1.16 or newer installed.
+
+Understanding how a system is functioning when it is failing or having issues is
+critical to resolving those issues. One strategy to understand this is with
+tracing. This guide shows how the OpenTelemetry Go project can be used to trace
+an example application. You will start with an application that computes
+Fibonacci numbers for users, and from there you will add instrumentation to
+produce tracing telemetry with OpenTelemetry Go.
+
+For reference, a complete example of the code you will build can be found
+[here](https://github.com/open-telemetry/opentelemetry-go/tree/main/example/fib).
+
+To start building the application, make a new directory named `fib` to house our
+Fibonacci project. Next, add the following to a new file named `fib.go` in that
+directory.
+
+```go
+package main
+
+// Fibonacci returns the n-th fibonacci number.
+func Fibonacci(n uint) (uint64, error) {
+ if n <= 1 {
+ return uint64(n), nil
+ }
+
+ var n2, n1 uint64 = 0, 1
+ for i := uint(2); i < n; i++ {
+ n2, n1 = n1, n1+n2
+ }
+
+ return n2 + n1, nil
+}
+```
+
+With your core logic added, you can now build your application around it. Add a
+new `app.go` file with the following application logic.
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+)
+
+// App is a Fibonacci computation application.
+type App struct {
+ r io.Reader
+ l *log.Logger
+}
+
+// NewApp returns a new App.
+func NewApp(r io.Reader, l *log.Logger) *App {
+ return &App{r: r, l: l}
+}
+
+// Run starts polling users for Fibonacci number requests and writes results.
+func (a *App) Run(ctx context.Context) error {
+ for {
+ n, err := a.Poll(ctx)
+ if err != nil {
+ return err
+ }
+
+ a.Write(ctx, n)
+ }
+}
+
+// Poll asks a user for input and returns the request.
+func (a *App) Poll(ctx context.Context) (uint, error) {
+ a.l.Print("What Fibonacci number would you like to know: ")
+
+ var n uint
+ _, err := fmt.Fscanf(a.r, "%d\n", &n)
+ return n, err
+}
+
+// Write writes the n-th Fibonacci number back to the user.
+func (a *App) Write(ctx context.Context, n uint) {
+ f, err := Fibonacci(n)
+ if err != nil {
+ a.l.Printf("Fibonacci(%d): %v\n", n, err)
+ } else {
+ a.l.Printf("Fibonacci(%d) = %d\n", n, f)
+ }
+}
+```
+
+With your application fully composed, you need a `main()` function to actually
+run the application. In a new `main.go` file add the following run logic.
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+ "os"
+ "os/signal"
+)
+
+func main() {
+ l := log.New(os.Stdout, "", 0)
+
+ sigCh := make(chan os.Signal, 1)
+ signal.Notify(sigCh, os.Interrupt)
+
+ errCh := make(chan error)
+ app := NewApp(os.Stdin, l)
+ go func() {
+ errCh <- app.Run(context.Background())
+ }()
+
+ select {
+ case <-sigCh:
+ l.Println("\ngoodbye")
+ return
+ case err := <-errCh:
+ if err != nil {
+ l.Fatal(err)
+ }
+ }
+}
+```
+
+With the code complete it is almost time to run the application. Before you can
+do that you need to initialize this directory as a Go module. From your
+terminal, run the command `go mod init fib` in the `fib` directory. This will
+create a `go.mod` file, which is used by Go to manage imports. Now you should be
+able to run the application!
+
+```console
+$ go run .
+What Fibonacci number would you like to know:
+42
+Fibonacci(42) = 267914296
+What Fibonacci number would you like to know:
+^C
+goodbye
+```
+
+The application can be exited with Ctrl+C. You should see a similar
+output as above, if not make sure to go back and fix any errors.
+
+## Trace Instrumentation
+
+OpenTelemetry is split into two parts: an API to instrument code with, and SDKs
+that implement the API. To start integrating OpenTelemetry into any project, the
+API is used to define how telemetry is generated. To generate tracing telemetry
+in your application you will use the OpenTelemetry Trace API from the
+[`go.opentelemetry.io/otel/trace`] package.
+
+First, you need to install the necessary packages for the Trace API. Run the
+following command in your working directory.
+
+```sh
+go get go.opentelemetry.io/otel \
+ go.opentelemetry.io/otel/trace
+```
+
+Now that the packages installed you can start updating your application with
+imports you will use in the `app.go` file.
+
+```go
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "strconv"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+```
+
+With the imports added, you can start instrumenting.
+
+The OpenTelemetry Tracing API provides a [`Tracer`] to create traces. These
+[`Tracer`]s are designed to be associated with one instrumentation library. That
+way telemetry they produce can be understood to come from that part of a code
+base. To uniquely identify your application to the [`Tracer`] you will create a
+constant with the package name in `app.go`.
+
+```go
+// name is the Tracer name used to identify this instrumentation library.
+const name = "fib"
+```
+
+Using the full-qualified package name, something that should be unique for Go
+packages, is the standard way to identify a [`Tracer`]. If your example package
+name differs, be sure to update the name you use here to match.
+
+Everything should be in place now to start tracing your application. But first,
+what is a trace? And, how exactly should you build them for your application?
+
+To back up a bit, a trace is a type of telemetry that represents work being done
+by a service. A trace is a record of the connection(s) between participants
+processing a transaction, often through client/server requests processing and
+other forms of communication.
+
+Each part of the work that a service performs is represented in the trace by a
+span. Those spans are not just an unordered collection. Like the call stack of
+our application, those spans are defined with relationships to one another. The
+"root" span is the only span without a parent, it represents how a service
+request is started. All other spans have a parent relationship to another span
+in the same trace.
+
+If this last part about span relationships doesn't make complete sense now,
+don't worry. The most important takeaway is that each part of your code, which
+does some work, should be represented as a span. You will have a better
+understanding of these span relationships after you instrument your code, so
+let's get started.
+
+Start by instrumenting the `Run` method.
+
+```go
+// Run starts polling users for Fibonacci number requests and writes results.
+func (a *App) Run(ctx context.Context) error {
+ for {
+ // Each execution of the run loop, we should get a new "root" span and context.
+ newCtx, span := otel.Tracer(name).Start(ctx, "Run")
+
+ n, err := a.Poll(newCtx)
+ if err != nil {
+ span.End()
+ return err
+ }
+
+ a.Write(newCtx, n)
+ span.End()
+ }
+}
+```
+
+The above code creates a span for every iteration of the for loop. The span is
+created using a [`Tracer`] from the
+[global `TracerProvider`](https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+You will learn more about [`TracerProvider`]s and handle the other side of
+setting up a global [`TracerProvider`] when you install an SDK in a later
+section. For now, as an instrumentation author, all you need to worry about is
+that you are using an appropriately named [`Tracer`] from a [`TracerProvider`]
+when you write `otel.Tracer(name)`.
+
+Next, instrument the `Poll` method.
+
+```go
+// Poll asks a user for input and returns the request.
+func (a *App) Poll(ctx context.Context) (uint, error) {
+ _, span := otel.Tracer(name).Start(ctx, "Poll")
+ defer span.End()
+
+ a.l.Print("What Fibonacci number would you like to know: ")
+
+ var n uint
+ _, err := fmt.Fscanf(a.r, "%d\n", &n)
+
+ // Store n as a string to not overflow an int64.
+ nStr := strconv.FormatUint(uint64(n), 10)
+ span.SetAttributes(attribute.String("request.n", nStr))
+
+ return n, err
+}
+```
+
+Similar to the `Run` method instrumentation, this adds a span to the method to
+track the computation performed. However, it also adds an attribute to annotate
+the span. This annotation is something you can add when you think a user of your
+application will want to see the state or details about the run environment when
+looking at telemetry.
+
+Finally, instrument the `Write` method.
+
+```go
+// Write writes the n-th Fibonacci number back to the user.
+func (a *App) Write(ctx context.Context, n uint) {
+ var span trace.Span
+ ctx, span = otel.Tracer(name).Start(ctx, "Write")
+ defer span.End()
+
+ f, err := func(ctx context.Context) (uint64, error) {
+ _, span := otel.Tracer(name).Start(ctx, "Fibonacci")
+ defer span.End()
+ return Fibonacci(n)
+ }(ctx)
+ if err != nil {
+ a.l.Printf("Fibonacci(%d): %v\n", n, err)
+ } else {
+ a.l.Printf("Fibonacci(%d) = %d\n", n, f)
+ }
+}
+```
+
+This method is instrumented with two spans. One to track the `Write` method
+itself, and another to track the call to the core logic with the `Fibonacci`
+function. Do you see how context is passed through the spans? Do you see how
+this also defines the relationship between spans?
+
+In OpenTelemetry Go the span relationships are defined explicitly with a
+`context.Context`. When a span is created a context is returned alongside the
+span. That context will contain a reference to the created span. If that context
+is used when creating another span the two spans will be related. The original
+span will become the new span's parent, and as a corollary, the new span is said
+to be a child of the original. This hierarchy gives traces structure, structure
+that helps show a computation path through a system. Based on what you
+instrumented above and this understanding of span relationships you should
+expect a trace for each execution of the run loop to look like this.
+
+```
+Run
+├── Poll
+└── Write
+ └── Fibonacci
+```
+
+A `Run` span will be a parent to both a `Poll` and `Write` span, and the `Write`
+span will be a parent to a `Fibonacci` span.
+
+Now how do you actually see the produced spans? To do this you will need to
+configure and install an SDK.
+
+## SDK Installation
+
+OpenTelemetry is designed to be modular in its implementation of the
+OpenTelemetry API. The OpenTelemetry Go project offers an SDK package,
+[`go.opentelemetry.io/otel/sdk`], that implements this API and adheres to the
+OpenTelemetry specification. To start using this SDK you will first need to
+create an exporter, but before anything can happen we need to install some
+packages. Run the following in the `fib` directory to install the trace STDOUT
+exporter and the SDK.
+
+```sh
+go get go.opentelemetry.io/otel/sdk \
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+```
+
+Now add the needed imports to `main.go`.
+
+```go
+import (
+ "context"
+ "io"
+ "log"
+ "os"
+ "os/signal"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+```
+
+### Creating a Console Exporter
+
+The SDK connects telemetry from the OpenTelemetry API to exporters. Exporters
+are packages that allow telemetry data to be emitted somewhere - either to the
+console (which is what we're doing here), or to a remote system or collector for
+further analysis and/or enrichment. OpenTelemetry supports a variety of
+exporters through its ecosystem including popular open source tools like
+[Jaeger](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger),
+[Zipkin](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/zipkin), and
+[Prometheus](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/prometheus).
+
+To initialize the console exporter, add the following function to the `main.go`
+file:
+
+```go
+// newExporter returns a console exporter.
+func newExporter(w io.Writer) (trace.SpanExporter, error) {
+ return stdouttrace.New(
+ stdouttrace.WithWriter(w),
+ // Use human-readable output.
+ stdouttrace.WithPrettyPrint(),
+ // Do not print timestamps for the demo.
+ stdouttrace.WithoutTimestamps(),
+ )
+}
+```
+
+This creates a new console exporter with basic options. You will use this
+function later when you configure the SDK to send telemetry data to it, but
+first you need to make sure that data is identifiable.
+
+### Creating a Resource
+
+Telemetry data can be crucial to solving issues with a service. The catch is,
+you need a way to identify what service, or even what service instance, that
+data is coming from. OpenTelemetry uses a [`Resource`] to represent the entity
+producing telemetry. Add the following function to the `main.go` file to create
+an appropriate [`Resource`] for the application.
+
+```go
+// newResource returns a resource describing this application.
+func newResource() *resource.Resource {
+ r, _ := resource.Merge(
+ resource.Default(),
+ resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceName("fib"),
+ semconv.ServiceVersion("v0.1.0"),
+ attribute.String("environment", "demo"),
+ ),
+ )
+ return r
+}
+```
+
+Any information you would like to associate with all telemetry data the SDK
+handles can be added to the returned [`Resource`]. This is done by registering
+the [`Resource`] with the [`TracerProvider`]. Something you can now create!
+
+### Installing a Tracer Provider
+
+You have your application instrumented to produce telemetry data and you have an
+exporter to send that data to the console, but how are they connected? This is
+where the [`TracerProvider`] is used. It is a centralized point where
+instrumentation will get a [`Tracer`] from and funnels the telemetry data from
+these [`Tracer`]s to export pipelines.
+
+The pipelines that receive and ultimately transmit data to exporters are called
+[`SpanProcessor`]s. A [`TracerProvider`] can be configured to have multiple span
+processors, but for this example you will only need to configure only one.
+Update your `main` function in `main.go` with the following.
+
+```go
+func main() {
+ l := log.New(os.Stdout, "", 0)
+
+ // Write telemetry data to a file.
+ f, err := os.Create("traces.txt")
+ if err != nil {
+ l.Fatal(err)
+ }
+ defer f.Close()
+
+ exp, err := newExporter(f)
+ if err != nil {
+ l.Fatal(err)
+ }
+
+ tp := trace.NewTracerProvider(
+ trace.WithBatcher(exp),
+ trace.WithResource(newResource()),
+ )
+ defer func() {
+ if err := tp.Shutdown(context.Background()); err != nil {
+ l.Fatal(err)
+ }
+ }()
+ otel.SetTracerProvider(tp)
+
+ /* … */
+}
+```
+
+There's a fair amount going on here. First you are creating a console exporter
+that will export to a file. You are then registering the exporter with a new
+[`TracerProvider`]. This is done with a [`BatchSpanProcessor`] when it is passed
+to the [`trace.WithBatcher`] option. Batching data is a good practice and will
+help not overload systems downstream. Finally, with the [`TracerProvider`]
+created, you are deferring a function to flush and stop it, and registering it
+as the global OpenTelemetry [`TracerProvider`].
+
+Do you remember in the previous instrumentation section when we used the global
+[`TracerProvider`] to get a [`Tracer`]? This last step, registering the
+[`TracerProvider`] globally, is what will connect that instrumentation's
+[`Tracer`] with this [`TracerProvider`]. This pattern, using a global
+[`TracerProvider`], is convenient, but not always appropriate.
+[`TracerProvider`]s can be explicitly passed to instrumentation or inferred from
+a context that contains a span. For this simple example using a global provider
+makes sense, but for more complex or distributed codebases these other ways of
+passing [`TracerProvider`]s may make more sense.
+
+## Putting It All Together
+
+You should now have a working application that produces trace telemetry data!
+Give it a try.
+
+```console
+$ go run .
+What Fibonacci number would you like to know:
+42
+Fibonacci(42) = 267914296
+What Fibonacci number would you like to know:
+^C
+goodbye
+```
+
+A new file named `traces.txt` should be created in your working directory. All
+the traces created from running your application should be in there!
+
+## (Bonus) Errors
+
+At this point you have a working application and it is producing tracing
+telemetry data. Unfortunately, it was discovered that there is an error in the
+core functionality of the `fib` module.
+
+```console
+$ go run .
+What Fibonacci number would you like to know:
+100
+Fibonacci(100) = 3736710778780434371
+# …
+```
+
+But the 100-th Fibonacci number is `354224848179261915075`, not
+`3736710778780434371`! This application is only meant as a demo, but it
+shouldn't return wrong values. Update the `Fibonacci` function to return an
+error instead of computing incorrect values.
+
+```go
+// Fibonacci returns the n-th fibonacci number. An error is returned if the
+// fibonacci number cannot be represented as a uint64.
+func Fibonacci(n uint) (uint64, error) {
+ if n <= 1 {
+ return uint64(n), nil
+ }
+
+ if n > 93 {
+ return 0, fmt.Errorf("unsupported fibonacci number %d: too large", n)
+ }
+
+ var n2, n1 uint64 = 0, 1
+ for i := uint(2); i < n; i++ {
+ n2, n1 = n1, n1+n2
+ }
+
+ return n2 + n1, nil
+}
+```
+
+Great, you have fixed the code, but it would be ideal to include errors returned
+to a user in the telemetry data. Luckily, spans can be configured to communicate
+this information. Update the `Write` method in `app.go` with the following code.
+
+```go
+// Write writes the n-th Fibonacci number back to the user.
+func (a *App) Write(ctx context.Context, n uint) {
+ var span trace.Span
+ ctx, span = otel.Tracer(name).Start(ctx, "Write")
+ defer span.End()
+
+ f, err := func(ctx context.Context) (uint64, error) {
+ _, span := otel.Tracer(name).Start(ctx, "Fibonacci")
+ defer span.End()
+ f, err := Fibonacci(n)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ }
+ return f, err
+ }(ctx)
+ /* … */
+}
+```
+
+With this change any error returned from the `Fibonacci` function will mark that
+span as an error and record an event describing the error.
+
+This is a great start, but it is not the only error returned in from the
+application. If a user makes a request for a non unsigned integer value the
+application will fail. Update the `Poll` method with a similar fix to capture
+this error in the telemetry data.
+
+```go
+// Poll asks a user for input and returns the request.
+func (a *App) Poll(ctx context.Context) (uint, error) {
+ _, span := otel.Tracer(name).Start(ctx, "Poll")
+ defer span.End()
+
+ a.l.Print("What Fibonacci number would you like to know: ")
+
+ var n uint
+ _, err := fmt.Fscanf(a.r, "%d\n", &n)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ return 0, err
+ }
+
+ // Store n as a string to not overflow an int64.
+ nStr := strconv.FormatUint(uint64(n), 10)
+ span.SetAttributes(attribute.String("request.n", nStr))
+
+ return n, nil
+}
+```
+
+All that is left is updating imports for the `app.go` file to include the
+[`go.opentelemetry.io/otel/codes`] package.
+
+```go
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "strconv"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+)
+```
+
+With these fixes in place and the instrumentation updated, re-trigger the bug.
+
+```console
+$ go run .
+What Fibonacci number would you like to know:
+100
+Fibonacci(100): unsupported fibonacci number 100: too large
+What Fibonacci number would you like to know:
+^C
+goodbye
+```
+
+Excellent! The application no longer returns wrong values, and looking at the
+telemetry data in the `traces.txt` file you should see the error captured as an
+event.
+
+```json
+"Events": [
+ {
+ "Name": "exception",
+ "Attributes": [
+ {
+ "Key": "exception.type",
+ "Value": {
+ "Type": "STRING",
+ "Value": "*errors.errorString"
+ }
+ },
+ {
+ "Key": "exception.message",
+ "Value": {
+ "Type": "STRING",
+ "Value": "unsupported fibonacci number 100: too large"
+ }
+ }
+ ],
+ ...
+ }
+]
+```
+
+## What's Next
+
+This guide has walked you through adding tracing instrumentation to an
+application and using a console exporter to send telemetry data to a file. There
+are many other topics to cover in OpenTelemetry, but you should be ready to
+start adding OpenTelemetry Go to your projects at this point. Go instrument your
+code!
+
+For more information about instrumenting your code and things you can do with
+spans, refer to the [manual instrumentation](/docs/instrumentation/go/manual/)
+documentation.
+
+You'll also want to configure an appropriate exporter to
+[export your telemetry data](/docs/instrumentation/go/exporters/) to one or more
+telemetry backends.
+
+[`go.opentelemetry.io/otel/trace`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/trace
+[`go.opentelemetry.io/otel/sdk`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/sdk
+[`go.opentelemetry.io/otel/codes`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/codes
+[`tracer`]: https://pkg.go.dev/go.opentelemetry.io/otel/trace#Tracer
+[`tracerprovider`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/trace#TracerProvider
+[`resource`]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Resource
+[`spanprocessor`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace#SpanProcessor
+[`batchspanprocessor`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace#NewBatchSpanProcessor
+[`trace.withbatcher`]:
+ https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace#WithBatcher
diff --git a/content/en/docs/instrumentation/go/libraries.md b/content/en/docs/instrumentation/go/libraries.md
new file mode 100644
index 000000000..8a3ff0129
--- /dev/null
+++ b/content/en/docs/instrumentation/go/libraries.md
@@ -0,0 +1,114 @@
+---
+title: Using instrumentation libraries
+linkTitle: Libraries
+aliases:
+ - /docs/instrumentation/go/using_instrumentation_libraries
+ - /docs/instrumentation/go/automatic_instrumentation
+weight: 40
+---
+
+Go does not support truly automatic instrumentation like other languages today.
+Instead, you'll need to depend on
+[instrumentation libraries](/docs/specs/otel/glossary/#instrumentation-library)
+that generate telemetry data for a particular instrumented library. For example,
+the instrumentation library for `net/http` will automatically create spans that
+track inbound and outbound requests once you configure it in your code.
+
+## Setup
+
+Each instrumentation library is a package. In general, this means you need to
+`go get` the appropriate package:
+
+```sh
+go get go.opentelemetry.io/contrib/instrumentation/{import-path}/otel{package-name}
+```
+
+And then configure it in your code based on what the library requires to be
+activated.
+
+## Example with `net/http`
+
+As an example, here's how you can set up automatic instrumentation for inbound
+HTTP requests for `net/http`:
+
+First, get the `net/http` instrumentation library:
+
+```sh
+go get go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
+```
+
+Next, use the library to wrap an HTTP handler in your code:
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Package-level tracer.
+// This should be configured in your code setup instead of here.
+var tracer = otel.Tracer("github.com/full/path/to/mypkg")
+
+// sleepy mocks work that your application does.
+func sleepy(ctx context.Context) {
+ _, span := tracer.Start(ctx, "sleep")
+ defer span.End()
+
+ sleepTime := 1 * time.Second
+ time.Sleep(sleepTime)
+ span.SetAttributes(attribute.Int("sleep.duration", int(sleepTime)))
+}
+
+// httpHandler is an HTTP handler function that is going to be instrumented.
+func httpHandler(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, World! I am instrumented automatically!")
+ ctx := r.Context()
+ sleepy(ctx)
+}
+
+func main() {
+ // Wrap your httpHandler function.
+ handler := http.HandlerFunc(httpHandler)
+ wrappedHandler := otelhttp.NewHandler(handler, "hello-instrumented")
+ http.Handle("/hello-instrumented", wrappedHandler)
+
+ // And start the HTTP serve.
+ log.Fatal(http.ListenAndServe(":3030", nil))
+}
+```
+
+Assuming that you have a `Tracer` and [exporter](../exporters/) configured, this
+code will:
+
+- Start an HTTP server on port `3030`
+- Automatically generate a span for each inbound HTTP request to
+ `/hello-instrumented`
+- Create a child span of the automatically-generated one that tracks the work
+ done in `sleepy`
+
+Connecting manual instrumentation you write in your app with instrumentation
+generated from a library is essential to get good observability into your apps
+and services.
+
+## Available packages
+
+A full list of instrumentation libraries available can be found in the
+[OpenTelemetry registry](/ecosystem/registry/?language=go&component=instrumentation).
+
+## Next steps
+
+Instrumentation libraries can do things like generate telemetry data for inbound
+and outbound HTTP requests, but they don't instrument your actual application.
+
+To get richer telemetry data, use [manual instrumentation](../manual/) to enrich
+your telemetry data from instrumentation libraries with instrumentation from
+your running application.
diff --git a/content/en/docs/instrumentation/go/manual.md b/content/en/docs/instrumentation/go/manual.md
new file mode 100644
index 000000000..c88ad68bc
--- /dev/null
+++ b/content/en/docs/instrumentation/go/manual.md
@@ -0,0 +1,302 @@
+---
+title: Manual Instrumentation
+linkTitle: Manual
+aliases:
+ - /docs/instrumentation/go/instrumentation
+ - /docs/instrumentation/go/manual_instrumentation
+weight: 30
+---
+
+Instrumentation is the process of adding observability code to your application.
+There are two general types of instrumentation - automatic, and manual - and you
+should be familiar with both in order to effectively instrument your software.
+
+## Getting a Tracer
+
+To create spans, you'll need to acquire or initialize a tracer first.
+
+### Initializing a new tracer
+
+Ensure you have the right packages installed:
+
+```sh
+go get go.opentelemetry.io/otel \
+ go.opentelemetry.io/otel/trace \
+ go.opentelemetry.io/otel/sdk \
+```
+
+Then initialize an exporter, resources, tracer provider, and finally a tracer.
+
+```go
+package app
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var tracer trace.Tracer
+
+func newExporter(ctx context.Context) /* (someExporter.Exporter, error) */ {
+ // Your preferred exporter: console, jaeger, zipkin, OTLP, etc.
+}
+
+func newTraceProvider(exp sdktrace.SpanExporter) *sdktrace.TracerProvider {
+ // Ensure default SDK resources and the required service name are set.
+ r, err := resource.Merge(
+ resource.Default(),
+ resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceName("ExampleService"),
+ ),
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return sdktrace.NewTracerProvider(
+ sdktrace.WithBatcher(exp),
+ sdktrace.WithResource(r),
+ )
+}
+
+func main() {
+ ctx := context.Background()
+
+ exp, err := newExporter(ctx)
+ if err != nil {
+ log.Fatalf("failed to initialize exporter: %v", err)
+ }
+
+ // Create a new tracer provider with a batch span processor and the given exporter.
+ tp := newTraceProvider(exp)
+
+ // Handle shutdown properly so nothing leaks.
+ defer func() { _ = tp.Shutdown(ctx) }()
+
+ otel.SetTracerProvider(tp)
+
+ // Finally, set the tracer that can be used for this package.
+ tracer = tp.Tracer("ExampleService")
+}
+```
+
+You can now access `tracer` to manually instrument your code.
+
+## Creating Spans
+
+Spans are created by tracers. If you don't have one initialized, you'll need to
+do that.
+
+To create a span with a tracer, you'll also need a handle on a `context.Context`
+instance. These will typically come from things like a request object and may
+already contain a parent span from an [instrumentation library][].
+
+```go
+func httpHandler(w http.ResponseWriter, r *http.Request) {
+ ctx, span := tracer.Start(r.Context(), "hello-span")
+ defer span.End()
+
+ // do some work to track with hello-span
+}
+```
+
+In Go, the `context` package is used to store the active span. When you start a
+span, you'll get a handle on not only the span that's created, but the modified
+context that contains it.
+
+Once a span has completed, it is immutable and can no longer be modified.
+
+### Get the current span
+
+To get the current span, you'll need to pull it out of a `context.Context` you
+have a handle on:
+
+```go
+// This context needs contain the active span you plan to extract.
+ctx := context.TODO()
+span := trace.SpanFromContext(ctx)
+
+// Do something with the current span, optionally calling `span.End()` if you want it to end
+```
+
+This can be helpful if you'd like to add information to the current span at a
+point in time.
+
+### Create nested spans
+
+You can create a nested span to track work in a nested operation.
+
+If the current `context.Context` you have a handle on already contains a span
+inside of it, creating a new span makes it a nested span. For example:
+
+```go
+func parentFunction(ctx context.Context) {
+ ctx, parentSpan := tracer.Start(ctx, "parent")
+ defer parentSpan.End()
+
+ // call the child function and start a nested span in there
+ childFunction(ctx)
+
+ // do more work - when this function ends, parentSpan will complete.
+}
+
+func childFunction(ctx context.Context) {
+ // Create a span to track `childFunction()` - this is a nested span whose parent is `parentSpan`
+ ctx, childSpan := tracer.Start(ctx, "child")
+ defer childSpan.End()
+
+ // do work here, when this function returns, childSpan will complete.
+}
+```
+
+Once a span has completed, it is immutable and can no longer be modified.
+
+### Span Attributes
+
+Attributes are keys and values that are applied as metadata to your spans and
+are useful for aggregating, filtering, and grouping traces. Attributes can be
+added at span creation, or at any other time during the lifecycle of a span
+before it has completed.
+
+```go
+// setting attributes at creation...
+ctx, span = tracer.Start(ctx, "attributesAtCreation", trace.WithAttributes(attribute.String("hello", "world")))
+// ... and after creation
+span.SetAttributes(attribute.Bool("isTrue", true), attribute.String("stringAttr", "hi!"))
+```
+
+Attribute keys can be precomputed, as well:
+
+```go
+var myKey = attribute.Key("myCoolAttribute")
+span.SetAttributes(myKey.String("a value"))
+```
+
+#### Semantic Attributes
+
+Semantic Attributes are attributes that are defined by the [OpenTelemetry
+Specification][] in order to provide a shared set of attribute keys across
+multiple languages, frameworks, and runtimes for common concepts like HTTP
+methods, status codes, user agents, and more. These attributes are available in
+the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
+
+For details, see [Trace semantic conventions][].
+
+### Events
+
+An event is a human-readable message on a span that represents "something
+happening" during it's lifetime. For example, imagine a function that requires
+exclusive access to a resource that is under a mutex. An event could be created
+at two points - once, when we try to gain access to the resource, and another
+when we acquire the mutex.
+
+```go
+span.AddEvent("Acquiring lock")
+mutex.Lock()
+span.AddEvent("Got lock, doing work...")
+// do stuff
+span.AddEvent("Unlocking")
+mutex.Unlock()
+```
+
+A useful characteristic of events is that their timestamps are displayed as
+offsets from the beginning of the span, allowing you to easily see how much time
+elapsed between them.
+
+Events can also have attributes of their own -
+
+```go
+span.AddEvent("Cancelled wait due to external signal", trace.WithAttributes(attribute.Int("pid", 4328), attribute.String("signal", "SIGHUP")))
+```
+
+### Set span status
+
+A status can be set on a span, typically used to specify that there was an error
+in the operation a span is tracking - .`Error`.
+
+```go
+import (
+ // ...
+ "go.opentelemetry.io/otel/codes"
+ // ...
+)
+
+// ...
+
+result, err := operationThatCouldFail()
+if err != nil {
+ span.SetStatus(codes.Error, "operationThatCouldFail failed")
+}
+```
+
+By default, the status for all spans is `Unset`. In rare cases, you may also
+wish to set the status to `Ok`. This should generally not be necessary, though.
+
+### Record errors
+
+If you have an operation that failed and you wish to capture the error it
+produced, you can record that error.
+
+```go
+import (
+ // ...
+ "go.opentelemetry.io/otel/codes"
+ // ...
+)
+
+// ...
+
+result, err := operationThatCouldFail()
+if err != nil {
+ span.SetStatus(codes.Error, "operationThatCouldFail failed")
+ span.RecordError(err)
+}
+```
+
+It is highly recommended that you also set a span's status to `Error` when using
+`RecordError`, unless you do not wish to consider the span tracking a failed
+operation as an error span. The `RecordError` function does **not**
+automatically set a span status when called.
+
+## Creating Metrics
+
+The metrics API is currently unstable, documentation TBA.
+
+## Propagators and Context
+
+Traces can extend beyond a single process. This requires _context propagation_,
+a mechanism where identifiers for a trace are sent to remote processes.
+
+In order to propagate trace context over the wire, a propagator must be
+registered with the OpenTelemetry API.
+
+```go
+import (
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/propagation"
+)
+...
+otel.SetTextMapPropagator(propagation.TraceContext{})
+```
+
+> OpenTelemetry also supports the B3 header format, for compatibility with
+> existing tracing systems (`go.opentelemetry.io/contrib/propagators/b3`) that
+> do not support the W3C TraceContext standard.
+
+After configuring context propagation, you'll most likely want to use automatic
+instrumentation to handle the behind-the-scenes work of actually managing
+serializing the context.
+
+[opentelemetry specification]: /docs/specs/otel/
+[trace semantic conventions]: /docs/specs/otel/trace/semantic_conventions/
+[instrumentation library]: ../libraries/
diff --git a/content/en/docs/instrumentation/go/resources.md b/content/en/docs/instrumentation/go/resources.md
new file mode 100644
index 000000000..5e830fdb3
--- /dev/null
+++ b/content/en/docs/instrumentation/go/resources.md
@@ -0,0 +1,50 @@
+---
+title: Resources
+weight: 70
+---
+
+Resources are a special type of attribute that apply to all spans generated by a
+process. These should be used to represent underlying metadata about a process
+that's non-ephemeral — for example, the hostname of a process, or its
+instance ID.
+
+Resources should be assigned to a tracer provider at its initialization, and are
+created much like attributes:
+
+```go
+resources := resource.NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ServiceNameKey.String("myService"),
+ semconv.ServiceVersionKey.String("1.0.0"),
+ semconv.ServiceInstanceIDKey.String("abcdef12345"),
+)
+
+provider := sdktrace.NewTracerProvider(
+ ...
+ sdktrace.WithResource(resources),
+)
+```
+
+Note the use of the `semconv` package to provide
+[conventional names](/docs/concepts/semantic-conventions/) for resource
+attributes. This helps ensure that consumers of telemetry produced with these
+semantic conventions can easily discover relevant attributes and understand
+their meaning.
+
+Resources can also be detected automatically through `resource.Detector`
+implementations. These `Detector`s may discover information about the currently
+running process, the operating system it is running on, the cloud provider
+hosting that operating system instance, or any number of other resource
+attributes.
+
+```go
+resources := resource.New(context.Background(),
+ resource.WithFromEnv(), // pull attributes from OTEL_RESOURCE_ATTRIBUTES and OTEL_SERVICE_NAME environment variables
+ resource.WithProcess(), // This option configures a set of Detectors that discover process information
+ resource.WithOS(), // This option configures a set of Detectors that discover OS information
+ resource.WithContainer(), // This option configures a set of Detectors that discover container information
+ resource.WithHost(), // This option configures a set of Detectors that discover host information
+ resource.WithDetectors(thirdparty.Detector{}), // Bring your own external Detector implementation
+ resource.WithAttributes(attribute.String("foo", "bar")), // Or specify resource attributes directly
+)
+```
diff --git a/content/en/docs/instrumentation/go/sampling.md b/content/en/docs/instrumentation/go/sampling.md
new file mode 100644
index 000000000..296ea25f8
--- /dev/null
+++ b/content/en/docs/instrumentation/go/sampling.md
@@ -0,0 +1,35 @@
+---
+title: Sampling
+weight: 80
+---
+
+Sampling is a process that restricts the amount of traces that are generated by
+a system. The exact sampler you should use depends on your specific needs, but
+in general you should make a decision at the start of a trace, and allow the
+sampling decision to propagate to other services.
+
+A sampler needs to be set on the tracer provider when its configured, as
+follows:
+
+```go
+provider := sdktrace.NewTracerProvider(
+ sdktrace.WithSampler(sdktrace.AlwaysSample()),
+)
+```
+
+`AlwaysSample` and `NeverSample` are fairly self-explanatory. Always means that
+every trace will be sampled, the converse holds as true for Never. When you're
+getting started, or in a development environment, you'll almost always want to
+use `AlwaysSample`.
+
+Other samplers include:
+
+- `TraceIDRatioBased`, which will sample a fraction of traces, based on the
+ fraction given to the sampler. Thus, if you set this to .5, half of traces
+ will be sampled.
+- `ParentBased`, which behaves differently based on the incoming sampling
+ decision. In general, this will sample spans that have parents that were
+ sampled, and will not sample spans whose parents were _not_ sampled.
+
+When you're in production, you should consider using the `TraceIDRatioBased`
+sampler with the `ParentBased` sampler.
diff --git a/hugo.yaml b/hugo.yaml
index d712ed05f..5bd7724af 100644
--- a/hugo.yaml
+++ b/hugo.yaml
@@ -179,8 +179,6 @@ module:
mounts:
- source: content/en
target: content
- - source: content-modules/opentelemetry-go/website_docs
- target: content/docs/instrumentation/go
- source: tmp/otel/specification
target: content/docs/specs/otel
- source: tmp/otlp/docs/specification.md
diff --git a/layouts/shortcodes/lang_instrumentation_index_head.md b/layouts/shortcodes/lang_instrumentation_index_head.md
deleted file mode 100644
index 26f771a27..000000000
--- a/layouts/shortcodes/lang_instrumentation_index_head.md
+++ /dev/null
@@ -1,40 +0,0 @@
-{{/*
- TODO: keep this duplicate of `layouts/shortcodes/docs/instrumentation/index-intro.md`
- only until the Go docs are updated.
-*/ -}}
-
-{{ $data := index $.Site.Data.instrumentation.languages (.Get 0) }}
-{{ $name := $data.name }}
-{{ $relUrl := printf "https://github.com/open-telemetry/opentelemetry-%s/releases" (.Get 0) -}}
-{{ $tracesStatus := $data.status.traces | humanize }}
-{{ $metricsStatus := $data.status.metrics | humanize }}
-{{ $logsStatus := $data.status.logs | humanize }}
-{{ if in "Stable Experimental" $tracesStatus }}
- {{ $tracesStatus = printf "[%s](/docs/specs/otel/versioning-and-stability/#%s)" $tracesStatus $data.status.traces }}
-{{ end }}
-{{ if in "Stable Experimental" $metricsStatus }}
- {{ $metricsStatus = printf "[%s](/docs/specs/otel/versioning-and-stability/#%s)" $metricsStatus $data.status.metrics }}
-{{ end }}
-{{ if in "Stable Experimental" $logsStatus }}
- {{ $logsStatus = printf "[%s](/docs/specs/otel/versioning-and-stability/#%s)" $logsStatus $data.status.logs }}
-{{ end }}
-This is the OpenTelemetry {{ $name }} documentation. OpenTelemetry is an
-observability framework -- an API, SDK, and tools that are designed to aid in
-the generation and collection of application telemetry data such as metrics,
-logs, and traces. This documentation is designed to help you understand how to
-get started using OpenTelemetry {{ $name }}.
-
-## Status and Releases
-
-The current status of the major functional components for OpenTelemetry {{ $name }} is
-as follows:
-
-| Traces | Metrics | Logs |
-| -------- | ------- | ------- |
-| {{ $tracesStatus }} | {{ $metricsStatus }} | {{ $logsStatus }} |
-
-For releases, including the [latest release][], see [Releases][].
-{{- .Inner }}
-
-[latest release]: {{ $relUrl }}/latest
-[Releases]: {{ $relUrl }}