[chore] Refcache refresh, oldest link is now 2024-08-06 (#6167)
Co-authored-by: Tiffany Hrabusa <30397949+tiffany76@users.noreply.github.com>
This commit is contained in:
parent
748555c22f
commit
43e2cb3b4d
|
|
@ -104,7 +104,9 @@ jobs:
|
|||
- name: Fail when refcache contains entries with HTTP status 4XX
|
||||
run: |
|
||||
if grep -B 1 -e '"StatusCode": 4' static/refcache.json; then
|
||||
echo "Run 'npx gulp prune' to remove 4xx entries from the refcache"
|
||||
echo "Run 'npm run _refcache:prune' to remove 404 entries from refcache.json,"
|
||||
echo "or run './scripts/double-check-refcache-400s.mjs' locally to address"
|
||||
echo "other 400-status entries."
|
||||
exit 1
|
||||
fi
|
||||
- name: Does the refcache need updating?
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ cSpell:ignore: bleh targetallocator
|
|||
|
||||
If you’ve enabled
|
||||
[Target Allocator](/docs/kubernetes/operator/target-allocator/) service
|
||||
discovery on the [OpenTelemetry Operator](/docs/kubernetes/operator), and the
|
||||
discovery on the [OpenTelemetry Operator](/docs/kubernetes/operator/), and the
|
||||
Target Allocator is failing to discover scrape targets, there are a few
|
||||
troubleshooting steps that you can take to help you understand what’s going on
|
||||
and restore normal operation.
|
||||
|
|
@ -21,9 +21,8 @@ Kubernetes cluster.
|
|||
|
||||
After you’ve deployed all of your resources to Kubernetes, make sure that the
|
||||
Target Allocator is discovering scrape targets from your
|
||||
[`ServiceMonitor`](https://prometheus-operator.dev/docs/operator/design/#servicemonitor)(s)
|
||||
or
|
||||
[`PodMonitor`](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors)(s).
|
||||
[`ServiceMonitor`](https://prometheus-operator.dev/docs/getting-started/design/#servicemonitor)(s)
|
||||
or [PodMonitor]s.
|
||||
|
||||
Suppose that you have this `ServiceMonitor` definition:
|
||||
|
||||
|
|
@ -386,9 +385,7 @@ Allocator will fail to discover scrape targets from that `ServiceMonitor`.
|
|||
|
||||
{{% alert title="Tip" %}}
|
||||
|
||||
The same applies if you’re using a
|
||||
[PodMonitor](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors).
|
||||
In that case, you would use a
|
||||
The same applies if you’re using a [PodMonitor]. In that case, you would use a
|
||||
[`podMonitorSelector`](https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspectargetallocatorprometheuscr)
|
||||
instead of a `serviceMonitorSelector`.
|
||||
|
||||
|
|
@ -513,3 +510,6 @@ If you’re using `PodMonitor`, the same applies, except that it picks up
|
|||
Kubernetes pods that match on labels, namespaces, and named ports.
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
[PodMonitor]:
|
||||
https://prometheus-operator.dev/docs/developer/getting-started/#using-podmonitors
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@
|
|||
- name: Causely
|
||||
distribution: false
|
||||
nativeOTLP: true
|
||||
url: https://github.com/Causely/documentation
|
||||
url: https://www.causely.ai/blog/using-opentelemetry-and-the-otel-collector-for-logs-metrics-and-traces
|
||||
contact: support@causely.io
|
||||
oss: false
|
||||
commercial: true
|
||||
|
|
@ -68,7 +68,7 @@
|
|||
- name: Chronosphere
|
||||
distribution: false
|
||||
nativeOTLP: true
|
||||
url: https://docs.chronosphere.io/ingest/otel/otel-ingest
|
||||
url: https://docs.chronosphere.io/ingest/
|
||||
contact: support@chronosphere.io
|
||||
oss: false
|
||||
commercial: true
|
||||
|
|
@ -314,7 +314,7 @@
|
|||
commercial: true
|
||||
- name: Red Hat
|
||||
nativeOTLP: true
|
||||
url: https://docs.openshift.com/container-platform/4.14/otel/otel-release-notes.html
|
||||
url: https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/red_hat_build_of_opentelemetry/
|
||||
contact: ploffay@redhat.com
|
||||
oss: true
|
||||
commercial: true
|
||||
|
|
|
|||
|
|
@ -20,13 +20,18 @@ async function writeRefcache(cache) {
|
|||
console.log(`Updated ${CACHE_FILE} with fixed links.`);
|
||||
}
|
||||
|
||||
async function retry404sAndUpdateCache() {
|
||||
// Retry HTTP status check for refcache URLs with non-200s and not 404
|
||||
async function retry400sAndUpdateCache() {
|
||||
const cache = await readRefcache();
|
||||
let updated = false;
|
||||
|
||||
for (const [url, details] of Object.entries(cache)) {
|
||||
const { StatusCode, LastSeen } = details;
|
||||
if (isHttp2XX(StatusCode)) continue;
|
||||
if (StatusCode === 404) {
|
||||
console.log(`Skipping 404: ${url} (last seen ${LastSeen}).`);
|
||||
continue;
|
||||
}
|
||||
|
||||
process.stdout.write(`Checking: ${url} (was ${StatusCode})... `);
|
||||
const status = await getUrlStatus(url);
|
||||
|
|
@ -49,4 +54,4 @@ async function retry404sAndUpdateCache() {
|
|||
}
|
||||
}
|
||||
|
||||
await retry404sAndUpdateCache();
|
||||
await retry400sAndUpdateCache();
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ export function isHttp2XX(status) {
|
|||
}
|
||||
|
||||
export async function getUrlStatus(url) {
|
||||
let status = 0; // await getUrlHeadless(url);
|
||||
let status = await getUrlHeadless(url);
|
||||
if (!isHttp2XX(status)) {
|
||||
status = await getUrlInBrowser(url);
|
||||
}
|
||||
|
|
|
|||
1898
static/refcache.json
1898
static/refcache.json
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue