Merge main into dev-1.31 to keep in sync

This commit is contained in:
hacktivist123 2024-06-10 10:28:02 +01:00
commit aecc20f0ad
144 changed files with 22900 additions and 745 deletions

View File

@ -122,6 +122,7 @@ aliases:
- Fale
- mattiaperi
sig-docs-ja-owners: # Admins for Japanese content
- bells17
- inductor
- nasa9084
sig-docs-ja-reviews: # PR reviews for Japanese content

View File

@ -0,0 +1,676 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
version="1.1"
viewBox="0 0 768 768"
width="768"
height="768"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs4">
<style
id="style1">
.cls-1 {
fill: #042b56;
}
.cls-1, .cls-2, .cls-3, .cls-4, .cls-5, .cls-6, .cls-7, .cls-8, .cls-9, .cls-10, .cls-11, .cls-12, .cls-13, .cls-14, .cls-15, .cls-16, .cls-17 {
stroke-width: 0px;
}
.cls-2 {
fill: #fdc939;
}
.cls-3 {
fill: #c0eddf;
}
.cls-4 {
fill: #089ca0;
}
.cls-5 {
fill: none;
}
.cls-6 {
fill: #326ce5;
}
.cls-7 {
fill: #eaeaea;
}
.cls-8 {
fill: url(#linear-gradient);
}
.cls-9 {
fill: #dae9f9;
}
.cls-10 {
fill: #00979b;
}
.cls-11 {
fill: #0cc1d1;
}
.cls-12 {
fill: #f4f4f4;
}
.cls-13 {
fill: #0444b5;
}
.cls-14 {
fill: #97d1ca;
}
.cls-15 {
fill: #e2e2e2;
}
.cls-16 {
fill: url(#linear-gradient-2);
}
.cls-17 {
fill: #fff;
}
</style>
<linearGradient
id="linear-gradient"
x1="796"
y1="2213.2"
x2="930.20001"
y2="2290.7"
gradientTransform="matrix(1,0,0,-1,-256,2552)"
gradientUnits="userSpaceOnUse">
<stop
offset="0"
stop-color="#cce0f2"
id="stop1" />
<stop
offset=".9"
stop-color="#dae9f9"
id="stop2" />
</linearGradient>
<linearGradient
id="linear-gradient-2"
x1="796"
y1="1189.2"
x2="930.20001"
y2="1266.7"
gradientTransform="matrix(1,0,0,-1,0,2680)"
gradientUnits="userSpaceOnUse">
<stop
offset="0"
stop-color="#001f39"
id="stop3" />
<stop
offset=".9"
stop-color="#042b56"
id="stop4" />
</linearGradient>
</defs>
<g
id="g221">
<path
class="cls-8"
d="m 505.5,303.9 c 0.6,0.1 1.2,0.4 1.6,1 l 0.4,0.2 v 0 c 0.5,0.8 0.4,1.8 0,2.6 v 7.1 0 27.7 l 32,18.5 c 0,0 2,0.7 2.9,2.6 0.3,0.6 0.5,1.3 0.5,2 v 35.2 0 3.9 0 17.3 c 0,1.8 -0.6,3.4 -1.7,4.7 L 696.9,336.8 C 687.4,284.4 665.2,236.4 633.5,196.2 l -157.1,90.7 29.1,16.8 z"
id="path4"
style="fill:url(#linear-gradient)" />
<g
id="g112"
transform="translate(-256,-128)">
<g
id="g220">
<g
id="g219">
<g
id="g14">
<path
class="cls-1"
d="m 489.1,707.8 h -11.5 l -12.6,-20.2 -4.3,3.1 v 17.1 h -10.2 v -48 h 10.2 v 21.9 l 4,-5.6 13,-16.3 H 489 l -16.7,21.2 16.9,26.7 v 0 z"
id="path5" />
<path
class="cls-1"
d="m 520.3,707.8 -1.3,-4.7 h -0.5 c -1.1,1.7 -2.6,3 -4.6,4 -2,1 -4.2,1.4 -6.7,1.4 -4.3,0 -7.6,-1.2 -9.7,-3.5 -2.2,-2.3 -3.3,-5.6 -3.3,-10 v -23.9 h 10 v 21.4 c 0,2.6 0.5,4.6 1.4,6 0.9,1.3 2.4,2 4.5,2 2.1,0 4.8,-0.9 6.1,-2.8 1.3,-1.9 1.9,-5 1.9,-9.3 v -17.3 h 10 v 36.7 z"
id="path6" />
<path
class="cls-1"
d="m 558.9,670.5 c 4.3,0 7.7,1.7 10.2,5.1 2.4,3.4 3.7,8 3.7,13.9 0,5.9 -1.3,10.8 -3.8,14.1 -2.5,3.3 -6,5 -10.3,5 -4.3,0 -7.7,-1.6 -10.1,-4.7 h -0.7 l -1.7,4 h -7.6 v -51.1 h 10 v 11.9 c 0,1.5 0,3.9 -0.4,7.3 h 0.4 c 2.3,-3.6 5.8,-5.4 10.4,-5.4 h -0.1 z m -3.2,8 c -2.5,0 -4.3,0.8 -5.4,2.3 -1.1,1.5 -1.7,4 -1.8,7.5 v 1.1 c 0,3.9 0.6,6.8 1.8,8.5 1.2,1.7 3,2.6 5.6,2.6 2.6,0 3.7,-0.9 4.9,-2.8 1.2,-1.9 1.8,-4.6 1.8,-8.3 0,-3.7 -0.6,-6.3 -1.8,-8.1 -1.2,-1.8 -2.9,-2.7 -5,-2.7 v 0 z"
id="path7" />
<path
class="cls-1"
d="m 597.6,708.5 c -5.9,0 -10.5,-1.6 -13.8,-4.9 -3.3,-3.3 -5,-7.9 -5,-13.8 0,-5.9 1.5,-10.9 4.6,-14.3 3.1,-3.4 7.3,-5 12.7,-5 5.4,0 9.2,1.5 12.1,4.4 2.9,2.9 4.3,7 4.3,12.2 v 4.9 h -23.7 c 0,2.8 1,5.1 2.5,6.7 1.6,1.6 3.8,2.4 6.6,2.4 2.8,0 4.3,-0.2 6.3,-0.7 2,-0.5 4,-1.2 6.2,-2.2 v 7.7 c -1.8,0.9 -3.6,1.5 -5.6,2 -2,0.4 -4.4,0.6 -7.3,0.6 z m -1.4,-30.9 c -2.1,0 -3.8,0.7 -5,2 -1.2,1.3 -1.9,3.3 -2.1,5.7 h 14 c 0,-2.5 -0.7,-4.4 -1.9,-5.7 -1.2,-1.3 -2.9,-2 -5.1,-2 v 0 z"
id="path8" />
<path
class="cls-1"
d="m 641.2,670.5 c 1.4,0 2.5,0 3.4,0.3 l -0.8,9.4 c -0.8,-0.2 -1.8,-0.3 -3,-0.3 -3.2,0 -5.7,0.8 -7.5,2.5 -1.8,1.6 -2.7,3.9 -2.7,6.9 V 708 h -10 v -36.7 h 7.6 l 1.5,6.2 h 0.5 c 1.1,-2.1 2.7,-3.7 4.6,-5 1.9,-1.3 4,-1.9 6.3,-1.9 h 0.1 z"
id="path9" />
<path
class="cls-1"
d="m 685.1,707.8 h -10 v -21.4 c 0,-2.6 -0.5,-4.6 -1.4,-6 -0.9,-1.3 -2.4,-2 -4.5,-2 -2.1,0 -4.8,0.9 -6.1,2.8 -1.3,1.9 -1.9,5 -1.9,9.3 v 17.3 h -10 v -36.7 h 7.6 l 1.3,4.7 h 0.6 c 1.1,-1.8 2.7,-3.1 4.6,-4 2,-0.9 4.2,-1.4 6.7,-1.4 4.3,0 7.5,1.2 9.7,3.5 2.2,2.3 3.3,5.6 3.3,10 v 23.9 z"
id="path10" />
<path
class="cls-1"
d="m 712,708.5 c -5.9,0 -10.5,-1.6 -13.8,-4.9 -3.3,-3.3 -5,-7.9 -5,-13.8 0,-5.9 1.5,-10.9 4.6,-14.3 3.1,-3.4 7.3,-5 12.7,-5 5.4,0 9.2,1.5 12.1,4.4 2.9,2.9 4.3,7 4.3,12.2 v 4.9 h -23.7 c 0,2.8 1,5.1 2.5,6.7 1.6,1.6 3.8,2.4 6.6,2.4 2.8,0 4.3,-0.2 6.3,-0.7 2,-0.5 4,-1.2 6.2,-2.2 v 7.7 c -1.8,0.9 -3.6,1.5 -5.6,2 -2,0.4 -4.4,0.6 -7.3,0.6 z m -1.4,-30.9 c -2.1,0 -3.8,0.7 -5,2 -1.2,1.3 -1.9,3.3 -2.1,5.7 h 14 c 0,-2.5 -0.7,-4.4 -1.9,-5.7 -1.2,-1.3 -2.9,-2 -5.1,-2 v 0 z"
id="path11" />
<path
class="cls-1"
d="m 750.5,700.5 c 1.7,0 3.8,-0.4 6.3,-1.1 v 7.4 c -2.5,1.1 -5.6,1.7 -9.2,1.7 -3.6,0 -6.9,-1 -8.7,-3 -1.8,-2 -2.7,-5.1 -2.7,-9.1 v -17.7 h -4.8 v -4.2 l 5.5,-3.3 2.9,-7.7 h 6.4 v 7.8 h 10.3 v 7.5 h -10.3 v 17.7 c 0,1.4 0.4,2.5 1.2,3.1 0.8,0.7 1.9,1 3.2,1 h -0.1 z"
id="path12" />
<path
class="cls-1"
d="m 780.8,708.5 c -5.9,0 -10.5,-1.6 -13.8,-4.9 -3.3,-3.3 -5,-7.9 -5,-13.8 0,-5.9 1.5,-10.9 4.6,-14.3 3.1,-3.4 7.3,-5 12.7,-5 5.4,0 9.2,1.5 12.1,4.4 2.9,2.9 4.3,7 4.3,12.2 V 692 H 772 c 0,2.8 1,5.1 2.5,6.7 1.6,1.6 3.8,2.4 6.6,2.4 2.8,0 4.3,-0.2 6.3,-0.7 2,-0.5 4,-1.2 6.2,-2.2 v 7.7 c -1.8,0.9 -3.6,1.5 -5.6,2 -2,0.4 -4.4,0.6 -7.3,0.6 z m -1.4,-30.9 c -2.1,0 -3.8,0.7 -5,2 -1.2,1.3 -1.9,3.3 -2.1,5.7 h 14 c 0,-2.5 -0.7,-4.4 -1.9,-5.7 -1.2,-1.3 -2.9,-2 -5.1,-2 v 0 z"
id="path13" />
<path
class="cls-1"
d="m 829.5,696.9 c 0,3.8 -1.3,6.6 -3.9,8.6 -2.6,2 -6.5,3 -11.7,3 -5.2,0 -4.9,-0.2 -6.8,-0.5 -1.9,-0.4 -3.6,-0.9 -5.3,-1.6 v -8.3 c 1.9,0.9 4,1.6 6.3,2.2 2.3,0.6 4.4,0.9 6.2,0.9 3.6,0 5.4,-1 5.4,-3.1 0,-2.1 -0.2,-1.4 -0.7,-1.9 -0.5,-0.5 -1.3,-1 -2.5,-1.7 -1.2,-0.7 -2.8,-1.3 -4.7,-2.2 -2.8,-1.2 -4.9,-2.3 -6.2,-3.3 -1.3,-1 -2.3,-2.2 -2.9,-3.5 -0.6,-1.3 -0.9,-2.9 -0.9,-4.8 0,-3.3 1.3,-5.8 3.8,-7.6 2.5,-1.8 6.1,-2.7 10.7,-2.7 4.6,0 8.7,1 12.9,2.9 l -3,7.2 c -1.8,-0.8 -3.6,-1.4 -5.2,-1.9 -1.6,-0.5 -3.2,-0.8 -4.9,-0.8 -3,0 -4.4,0.8 -4.4,2.4 0,1.6 0.5,1.7 1.4,2.3 0.9,0.6 3,1.6 6.3,2.9 2.9,1.2 5,2.2 6.3,3.2 1.3,1 2.3,2.2 3,3.5 0.6,1.3 1,2.9 1,4.7 h -0.2 v 0 z"
id="path14" />
</g>
<g
id="g21">
<path
class="cls-1"
d="m 549.4,750.9 h -1.6 v -17.5 c 0,-1.6 0,-3.2 0.2,-4.9 l -0.5,0.5 c -0.2,0.2 -1.9,1.5 -5.1,4 l -0.9,-1.2 6.5,-4.9 h 1.4 z"
id="path15" />
<path
class="cls-1"
d="m 584.8,738.8 c 0,4.2 -0.6,7.3 -1.9,9.4 -1.3,2.1 -3.2,3 -5.8,3 -2.6,0 -4.4,-1 -5.7,-3.1 -1.3,-2.1 -2,-5.2 -2,-9.3 0,-4.1 0.6,-7.3 1.9,-9.3 1.3,-2 3.2,-3 5.8,-3 2.6,0 4.5,1 5.8,3.1 1.3,2.1 2,5.1 2,9.2 v 0 z m -13.7,0 c 0,3.7 0.5,6.5 1.5,8.2 1,1.7 2.5,2.7 4.4,2.7 1.9,0 3.6,-0.9 4.5,-2.8 0.9,-1.9 1.4,-4.6 1.4,-8.2 0,-3.6 -0.5,-6.2 -1.4,-8.1 -0.9,-1.9 -2.5,-2.8 -4.5,-2.8 -2,0 -3.6,0.9 -4.5,2.8 -0.9,1.9 -1.4,4.5 -1.4,8.1 v 0 z"
id="path16" />
<path
class="cls-1"
d="m 624.4,740 7,-13.1 h 1.8 l -8,14.7 v 9.3 h -1.7 v -9.1 l -8,-14.8 h 1.9 l 7,13.1 v 0 z"
id="path17" />
<path
class="cls-1"
d="m 659.8,750.9 h -13.1 v -24 h 13.1 v 1.5 h -11.4 v 9.1 h 10.8 v 1.5 h -10.8 v 10.3 h 11.4 v 1.5 0 z"
id="path18" />
<path
class="cls-1"
d="m 686.9,742.4 h -9.7 l -3.3,8.5 H 672 l 9.6,-24.1 h 1.1 l 9.4,24.1 h -1.9 z m -9.1,-1.5 h 8.6 l -3.3,-8.6 c -0.3,-0.7 -0.6,-1.6 -1,-2.8 -0.3,1 -0.6,2 -1,2.9 l -3.4,8.6 h 0.1 z"
id="path19" />
<path
class="cls-1"
d="m 707.3,740.4 v 10.4 h -1.7 v -24 h 5.7 c 3,0 5.2,0.5 6.6,1.6 1.4,1.1 2.1,2.8 2.1,5 0,2.2 -0.4,3 -1.3,4.1 -0.9,1.1 -2.1,1.9 -3.9,2.4 l 6.5,10.9 h -2 l -6.2,-10.4 h -6 z m 0,-1.4 h 4.6 c 2,0 3.6,-0.5 4.7,-1.4 1.1,-0.9 1.7,-2.2 1.7,-4 0,-1.8 -0.5,-3.2 -1.6,-4 -1.1,-0.8 -2.9,-1.2 -5.3,-1.2 h -4 V 739 Z"
id="path20" />
<path
class="cls-1"
d="m 748.9,744.7 c 0,2 -0.7,3.6 -2.2,4.8 -1.5,1.2 -3.4,1.8 -5.9,1.8 -2.5,0 -5.2,-0.3 -6.7,-1 v -1.7 c 1.7,0.7 3.9,1.1 6.6,1.1 2.7,0 3.5,-0.5 4.7,-1.4 1.2,-0.9 1.7,-2.1 1.7,-3.6 0,-1.5 -0.2,-1.7 -0.6,-2.3 -0.4,-0.6 -1,-1.1 -1.9,-1.6 -0.9,-0.5 -2.1,-1 -3.8,-1.6 -2.5,-0.8 -4.1,-1.8 -5.1,-2.7 -1,-0.9 -1.4,-2.3 -1.4,-3.9 0,-1.6 0.7,-3.3 2.1,-4.4 1.4,-1.1 3.2,-1.7 5.4,-1.7 2.2,0 4.4,0.4 6.3,1.3 l -0.6,1.4 c -2,-0.8 -3.9,-1.2 -5.7,-1.2 -1.8,0 -3.2,0.4 -4.2,1.2 -1,0.8 -1.6,1.9 -1.6,3.3 0,1.4 0.2,1.6 0.5,2.2 0.3,0.6 0.9,1.1 1.6,1.5 0.7,0.4 2,1 3.8,1.6 1.9,0.6 3.3,1.3 4.2,1.9 0.9,0.6 1.6,1.3 2.1,2.1 0.5,0.8 0.6,1.7 0.6,2.8 h 0.1 z"
id="path21" />
</g>
<g
id="g111">
<path
class="cls-3"
d="m 638.7,576.5 c -3,0 -6,-0.9 -8.7,-2.4 v 0 l -148.3,-85.5 -1.3,0.8 h -0.1 c -0.1,0.2 -0.3,0.3 -0.4,0.5 -0.8,0.7 -1.3,1.7 -1.3,2.8 v 56.9 c 0,1.8 0.7,3.6 1.8,4.9 l 145.2,84.6 c 4.1,2.4 8.7,3.6 13.3,3.6 v -66.1 h -0.2 z"
id="path22" />
<polygon
class="cls-5"
points="662.8,616.9 649.9,624.4 "
id="polygon22" />
<path
class="cls-5"
d="m 641.1,576.4 c 1.6,-0.2 3.2,-0.6 4.8,-1.2 -1.5,0.6 -3.2,1 -4.8,1.2 z"
id="path23" />
<path
class="cls-5"
d="m 630.6,574.5 c 1.2,0.6 2.3,1.1 3.6,1.4 -1.2,-0.3 -2.4,-0.8 -3.6,-1.4 z"
id="path24" />
<path
class="cls-5"
d="m 647.1,574.7 c 0.4,-0.2 0.8,-0.3 1.1,-0.6 v 0 c -0.4,0.2 -0.8,0.4 -1.1,0.6 z"
id="path25" />
<path
class="cls-5"
d="m 642.5,627 v 0 c 0.6,0 1.3,-0.2 1.9,-0.4 -0.6,0.2 -1.3,0.3 -1.9,0.4 z"
id="path26" />
<path
class="cls-5"
d="m 645.8,620.7 c 1.1,-0.4 2.1,-0.9 3.1,-1.4 l 13.9,-8 -13.9,8 c -1,0.6 -2.1,1.1 -3.1,1.4 z"
id="path27" />
<polygon
class="cls-5"
points="795.6,489.2 793.3,490.5 795.4,489.3 "
id="polygon27" />
<polygon
class="cls-5"
points="666.5,614.8 777.7,550.8 777.7,545.1 777.7,541.2 666.5,605.2 666.5,609.2 "
id="polygon28" />
<path
class="cls-5"
d="m 639.2,621.9 c 1.1,0 2.2,0 3.3,-0.3 0.6,0 1.1,-0.2 1.7,-0.4 -0.6,0 -1.1,0.3 -1.7,0.4 -1.1,0.2 -2.2,0.3 -3.3,0.3 z"
id="path28" />
<path
class="cls-5"
d="m 638.7,576.5 h 2.5 z"
id="path29" />
<path
class="cls-5"
d="M 637.5,627.2 Z"
id="path30" />
<path
class="cls-5"
d="m 641.1,627.1 h -0.4 z"
id="path31" />
<path
class="cls-14"
d="m 777.7,541.2 v -3.7 c 0,-0.9 -0.3,-1.1 -0.3,-1.1 0,0 -0.2,-0.2 -0.5,0 l -107.6,61.8 c -1.4,0.8 -2.7,3.3 -2.7,5.3 v 1.9 0 l 111.2,-64.1 h -0.1 z"
id="path32" />
<path
class="cls-14"
d="m 793.3,490.5 -145.1,83.7 v 0 c -0.4,0.2 -0.8,0.4 -1.1,0.6 -1.1,0.6 -2.3,1 -3.5,1.3 -0.8,0.2 -1.6,0.3 -2.4,0.4 h -2.5 v 29 h 0.4 -0.4 v 12.9 h 0.5 c 1.1,0 2.2,0 3.3,-0.4 1.6,-0.3 3.2,-0.9 4.7,-1.8 l 15.6,-9 v -4 c 0,-3.3 1.9,-6.9 4.5,-8.4 L 774.9,533 c 1.4,-0.8 2.9,-0.9 4.2,-0.2 1.4,0.8 2.1,2.4 2.1,4.5 v 1.6 l 17.6,-10.2 v -35.2 c 0,-0.7 -0.2,-1.4 -0.5,-2 -0.9,-1.9 -2.9,-2.6 -2.9,-2.6 h -0.2 c 0,0.1 -2,1.3 -2,1.3 v 0.2 h 0.1 z"
id="path33" />
<path
class="cls-14"
d="m 666.5,617.1 c 0,0.9 0.3,1.3 0.3,1.3 0,0 0.2,0 0.5,-0.2 l 107.6,-61.8 c 1.4,-0.8 2.7,-3.3 2.7,-5.3 v -0.5 l -111.2,64.1 v 2.3 h 0.1 z"
id="path34" />
<path
class="cls-14"
d="m 776.8,559.6 -107.7,61.8 c -0.8,0.4 -1.6,0.7 -2.3,0.7 -0.7,0 -1.3,-0.2 -1.8,-0.5 -1.4,-0.8 -2.1,-2.4 -2.1,-4.5 v -0.2 l -13,7.5 c -0.6,0.3 -1.2,0.6 -1.8,0.9 -1.2,0.5 -2.5,1 -3.7,1.3 -0.6,0.2 -1.3,0.3 -1.9,0.4 v 0 c -0.4,0 -0.9,0 -1.3,0.2 h -2.5 v 15.4 c 4.6,0 9.2,-1.2 13.3,-3.6 l 145.1,-84 c 1.2,-1.4 1.9,-3.1 1.9,-5 v -11.6 l -17.6,10.2 v 2.5 c 0,3.3 -1.9,6.9 -4.5,8.4 h -0.1 z"
id="path35" />
<path
class="cls-5"
d="m 638.7,576.5 v 0 c -1.5,0 -3,-0.2 -4.5,-0.6 -1.2,-0.3 -2.4,-0.8 -3.6,-1.4 -0.2,0 -0.5,-0.2 -0.7,-0.3 v 0 c 2.7,1.6 5.7,2.4 8.7,2.4 v 0 z"
id="path36" />
<polygon
class="cls-5"
points=""
id="polygon36" />
<polygon
class="cls-14"
points=""
id="polygon37" />
<path
class="cls-14"
d="m 629.9,574.1 v 0 c 2.7,1.6 5.7,2.4 8.7,2.4 v 0 c -3,0 -6,-0.9 -8.7,-2.4 z"
id="path37" />
<path
class="cls-5"
d="m 638.7,576.5 h 2.5 c 0.8,0 1.6,-0.2 2.4,-0.4 1.2,-0.3 2.4,-0.8 3.5,-1.3 0.4,-0.2 0.8,-0.3 1.1,-0.6 v 0 c -0.7,0.4 -1.5,0.8 -2.3,1.1 -1.5,0.6 -3.2,1 -4.8,1.2 h -2.5 v 0 z"
id="path38" />
<path
class="cls-14"
d="m 647.1,574.7 c -1.1,0.6 -2.3,1 -3.5,1.3 1.2,-0.3 2.4,-0.8 3.5,-1.3 z"
id="path39" />
<path
class="cls-7"
d="m 515,470.5 36.6,21.1 C 533.8,481.4 519,472.8 514.9,470.5 Z"
id="path40" />
<path
class="cls-7"
d="m 763.6,479.1 c 0,1.5 -0.6,2.9 -1.6,4 l -113.6,65.7 c -3,1.7 -6.3,2.6 -9.7,2.6 v 25.1 h 2.5 c 1.6,-0.2 3.2,-0.6 4.8,-1.2 0.8,-0.3 1.5,-0.7 2.3,-1.1 v 0 l 141.8,-81.8 3.3,-1.9 2.3,-1.3 -32,-18.5 v 8.4 0 z"
id="path41" />
<path
class="cls-7"
d="m 548.9,462.3 c 9.2,5.3 5,12.7 14.3,18 9.2,5.3 13.5,-2 22.7,3.3 9.2,5.3 5,12.7 14.3,18 9.2,5.3 13.5,-2 22.7,3.3 9.1,5.2 5.9,18 15.8,18.5 V 401.2 c -3.3,0 -6.7,0.7 -9.7,2.4 l -98.9,57.1 c 6.7,1.7 10.9,-2.8 18.7,1.7 h 0.1 z"
id="path42" />
<path
class="cls-7"
d="m 639,523.3 v 0 c 10.6,0 7.1,-13.2 16.3,-18.5 9.2,-5.3 13.4,2 22.6,-3.2 9.2,-5.3 5,-12.6 14.2,-17.9 9.2,-5.3 13.4,2 22.6,-3.2 9.2,-5.3 5,-12.6 14.2,-17.9 7.5,-4.3 11.6,-0.3 17.9,-1.4 l -99.7,-57.5 c -2.7,-1.5 -5.7,-2.3 -8.6,-2.4 v 0 122.2 h 0.4 v -0.2 0 z"
id="path43" />
<path
class="cls-7"
d="m 630.6,574.5 c 1.2,0.6 2.3,1.1 3.6,1.4 1.5,0.4 3,0.6 4.5,0.6 v -25.1 c -3.3,0 -6.7,-0.9 -9.7,-2.6 L 515.1,482.4 c -0.8,-1.1 -1.4,-2.3 -1.4,-3.7 v -8.6 l -32.2,18.6 148.3,85.5 c 0.2,0 0.5,0.2 0.7,0.3 z"
id="path44" />
<polygon
class="cls-7"
points="629.9,574.1 481.6,488.6 "
id="polygon44" />
<polygon
class="cls-7"
points="790,492.4 648.2,574.1 793.3,490.5 "
id="polygon45" />
<polygon
class="cls-7"
points="629.9,574.1 481.6,488.6 "
id="polygon46" />
<path
class="cls-7"
d="m 629.9,574.1 v 0 c 0.2,0 0.5,0.2 0.7,0.3 -0.2,0 -0.5,-0.2 -0.7,-0.3 z"
id="path46" />
<polygon
class="cls-7"
points="629.9,574.1 481.6,488.6 "
id="polygon47" />
<path
class="cls-10"
d="m 639.1,627.2 h -0.5 2.1 -1.6 c -0.6,0 0,0 0,0 z"
id="path47" />
<path
class="cls-10"
d="m 632,626 c 1.2,0.4 2.5,0.7 3.8,0.9 0.5,0 1.1,0 1.6,0.2 -0.5,0 -1.1,0 -1.6,-0.2 -1.3,-0.2 -2.6,-0.5 -3.8,-0.9 z"
id="path48" />
<path
class="cls-10"
d="m 642.5,627 c -0.4,0 -0.9,0 -1.3,0.2 0.5,0 1,0 1.3,-0.2 z"
id="path49" />
<path
class="cls-10"
d="m 648.1,625.3 c -1.2,0.5 -2.5,1 -3.7,1.3 1.3,-0.3 2.5,-0.7 3.7,-1.3 z"
id="path50" />
<path
class="cls-10"
d="m 637.5,627.2 h 1.2 z"
id="path51" />
<polygon
class="cls-13"
points="777.7,545.1 781.3,543 798.9,532.8 781.3,543 "
id="polygon51" />
<polygon
class="cls-13"
points="662.8,607.3 647.2,616.3 "
id="polygon52" />
<path
class="cls-14"
d="m 639.1,605.6 h -0.4 z"
id="path52" />
<path
class="cls-14"
d="m 637.2,605.5 h 1.5 z"
id="path53" />
<polygon
class="cls-10"
points="763.6,465.7 759.2,468.2 763.6,470.7 "
id="polygon53" />
<path
class="cls-10"
d="m 646.6,533 c -2.3,1.3 -5,2 -7.6,2 h -0.4 v 4.4 h 0.4 c 2.8,0 5.6,-0.7 8.2,-2.2 l 116.4,-66.5 v -0.2 l -4.3,-2.5 -112.6,64.9 h -0.1 z"
id="path54" />
<path
class="cls-10"
d="m 630.9,537.3 -79.3,-45.7 c 34.2,19.7 79.3,45.7 79.3,45.7 z"
id="path55" />
<path
class="cls-6"
d="m 520.9,466 -6.4,-3.7 c -0.2,-0.1 -0.5,-0.2 -0.7,-0.2 v 3.6 l 3.8,2.2 z"
id="path56" />
<path
class="cls-13"
d="m 763.3,462 -7.4,4.2 -111,63.9 c -1.8,1.1 -3.9,1.6 -5.9,1.6 -2,0 -0.2,0 -0.4,0 v 3.3 h 0.4 c 0.2,0 0,0 0,0 2.6,0 5.2,-0.7 7.6,-2 l 112.6,-64.9 4.3,-2.5 v -3.8 h -0.3 v 0.2 z"
id="path57" />
<polygon
class="cls-6"
points="517.6,467.9 513.8,465.7 514.5,466.1 "
id="polygon57" />
<polygon
class="cls-6"
points="631.5,533 517.6,467.9 "
id="polygon58" />
<polygon
class="cls-12"
points="648.2,574.1 795.6,489.2 "
id="polygon59" />
<path
class="cls-13"
d="m 766.1,487.2 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path59" />
<path
class="cls-13"
d="m 726.6,511.1 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path60" />
<path
class="cls-13"
d="m 682.5,535.6 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path61" />
<circle
class="cls-6"
cx="638.70001"
cy="561.40002"
r="6.6999998"
id="circle61" />
<path
class="cls-6"
d="m 587.1,537.9 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path62" />
<path
class="cls-6"
d="m 540.2,511.6 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path63" />
<path
class="cls-6"
d="m 496.5,486.2 c 0,3.7 3,6.7 6.7,6.7 3.7,0 6.7,-3 6.7,-6.7 0,-3.7 -3,-6.7 -6.7,-6.7 -3.7,0 -6.7,3 -6.7,6.7 z"
id="path64" />
<path
class="cls-14"
d="m 665.5,620.7 c -1,-0.6 -1.6,-1.9 -1.6,-3.6 v -13.8 c 0,-2.9 1.7,-6.2 4,-7.5 L 775.5,534 c 1.1,-0.6 2.2,-0.7 3.1,-0.2 1,0.6 1.6,1.9 1.6,3.6 v 13.8 c 0,2.9 -1.7,6.2 -4,7.5 l -107.7,61.8 c -0.6,0.4 -1.2,0.5 -1.8,0.5 -0.6,0 -0.9,0 -1.3,-0.3 z"
id="path65" />
<g
id="g69">
<path
class="cls-11"
d="m 769.7,545.8 c 0,1.7 1.2,2.4 2.6,1.5 1.4,-0.9 2.6,-2.9 2.6,-4.6 0,-1.7 -1.2,-2.4 -2.6,-1.5 -1.5,0.8 -2.6,2.9 -2.6,4.6 z"
id="path66" />
<path
class="cls-11"
d="m 769.7,554.4 c 0,1.7 1.2,2.4 2.6,1.5 1.4,-0.9 2.6,-2.9 2.6,-4.6 0,-1.7 -1.2,-2.4 -2.6,-1.5 -1.5,0.8 -2.6,2.9 -2.6,4.6 z"
id="path67" />
<path
class="cls-4"
d="m 771,554.8 c 1.5,-0.8 2.6,-2.9 2.6,-4.6 0,-1.7 0,-0.6 0,-0.8 0.9,0 1.4,0.7 1.4,1.9 0,1.7 -1.2,3.7 -2.6,4.6 -1.2,0.7 -2.2,0.3 -2.5,-0.7 0.4,0 0.8,0 1.2,-0.3 H 771 Z"
id="path68" />
<path
class="cls-4"
d="m 771,546.3 c 1.5,-0.8 2.6,-2.9 2.6,-4.6 0,-1.7 0,-0.6 0,-0.8 0.9,0 1.4,0.7 1.4,1.9 0,1.7 -1.2,3.7 -2.6,4.6 -1.2,0.7 -2.2,0.3 -2.5,-0.7 0.4,0 0.8,0 1.2,-0.3 H 771 Z"
id="path69" />
</g>
<path
class="cls-10"
d="m 672.8,612.4 2.1,-1.2 c 0.6,-0.4 1.2,-1.4 1.2,-2.2 v -9.9 c 0,-0.9 -0.5,-1.2 -1.2,-0.9 l -2.1,1.2 c -0.6,0.4 -1.2,1.4 -1.2,2.2 v 9.9 c 0,0.9 0.5,1.2 1.2,0.9 z"
id="path70" />
<path
class="cls-5"
d="m 778.7,551.2 c 0,2.4 -1.5,5.2 -3.2,6.2 l -107.6,61.8 c -0.4,0.3 -1.1,0.5 -1.6,0.2 -0.5,-0.3 -0.9,-1.2 -0.9,-2.2 v -7.4 l 113.3,-65.3 v 6.8 0 z"
id="path71" />
<path
class="cls-5"
d="m 668.6,597.2 107.7,-61.8 c 0.4,-0.3 1.1,-0.5 1.6,-0.2 0.5,0.3 0.9,1.2 0.9,2.2 v 7 l -113.3,65.3 v -6.4 c 0,-2.4 1.5,-5.2 3.2,-6.2 h -0.1 z"
id="path72" />
<path
class="cls-5"
d="m 724.6,540.9 c 1.3,-2 3,-3.9 5.4,-5.6 -2.4,1.7 -4.1,3.6 -5.4,5.6 z"
id="path73" />
<path
class="cls-3"
d="m 630.9,537.3 c 0,0 -116.7,-67.3 -116.9,-67.4 h -0.2 c 0,-0.1 0,8.7 0,8.7 0,1.4 0.5,2.7 1.4,3.7 l 113.9,66.4 c 3,1.7 6.3,2.6 9.7,2.6 v -11.9 c -2.7,0 -5.4,-0.8 -7.8,-2.1 z"
id="path74" />
<path
class="cls-14"
d="M 648.4,548.8 762,483.1 c 1,-1.1 1.6,-2.5 1.6,-4 v -8.2 l -116.4,66.5 c -2.5,1.5 -5.3,2.2 -8.2,2.2 h -0.4 v 11.9 c 3.3,0 6.7,-0.9 9.7,-2.6 h 0.1 z"
id="path75" />
<path
class="cls-14"
d="m 751.7,459.3 c -1.7,1 -3.3,1.5 -4.8,1.8 -6.2,1.1 -10.4,-2.9 -17.9,1.4 -9.2,5.3 -5,12.6 -14.2,17.9 -9.2,5.3 -13.4,-2 -22.6,3.2 -9.2,5.3 -5,12.6 -14.2,17.9 -9.2,5.3 -13.4,-2 -22.6,3.2 -9.2,5.3 -5.7,18.5 -16.3,18.5 h -0.5 v 8.4 h 0.4 c 2,0 4.1,-0.5 5.9,-1.6 l 111,-63.9 7.4,-4.2 h 0.3 V 443 c -6.1,5.1 -3.6,11.5 -11.8,16.3 z"
id="path76" />
<path
class="cls-3"
d="m 622.8,504.8 c -9.2,-5.3 -13.5,2 -22.7,-3.3 -9.2,-5.3 -5,-12.7 -14.3,-18 -9.2,-5.3 -13.5,2 -22.7,-3.3 -9.2,-5.3 -5,-12.7 -14.3,-18 -7.8,-4.5 -12,0 -18.7,-1.7 -1.2,-0.3 -2.5,-0.8 -4,-1.6 -8.5,-4.9 -5.6,-11.5 -12.4,-16.7 V 462 c 0.2,0 0.5,0 0.7,0.2 l 6.4,3.7 112.2,64.2 c 1.7,1 3.6,1.5 5.6,1.6 v -8.4 c -10,-0.5 -6.8,-13.3 -15.8,-18.5 z"
id="path77" />
<path
class="cls-11"
d="m 631.5,533 -113.9,-65.2 -3.1,-1.8 -0.7,-0.4 v 4.1 h 0.2 c 0.2,0.2 0.5,0.4 0.9,0.6 4.1,2.4 18.9,10.9 36.7,21.1 l 79.3,45.7 c 2.4,1.4 5.1,2.1 7.8,2.1 v -4.4 c -2.5,0 -5,-0.7 -7.2,-2 z"
id="path78" />
<path
class="cls-6"
d="m 638.7,535 v -3.3 c -1.9,0 -3.8,-0.6 -5.6,-1.6 l -112.2,-64.2 -3.3,1.9 113.9,65.2 c 2.2,1.3 4.7,1.9 7.2,2 z"
id="path79" />
<path
class="cls-12"
d="m 516.1,434 115.8,66.9 c 4.4,2.6 9.9,2.6 14.3,0 l 117.4,-67.8 v 0 L 645.4,364.9 c -4.4,-2.6 -9.9,-2.6 -14.3,0 l -115,66.4 c -1,0.6 -1,2.1 0,2.7 z"
id="path80" />
<path
class="cls-12"
d="m 763.5,433.2 c -0.8,-1.3 -2.5,-1.8 -3.8,-1 l -115.3,66.4 c -3.3,1.9 -7.4,1.9 -10.7,0 L 517.8,431.8 c -1.3,-0.8 -3.1,-0.3 -3.8,1 -0.4,0.7 -0.4,1.6 -0.2,2.3 v 7 c 6.7,5.2 3.8,11.8 12.4,16.7 9.3,5.3 13.5,-2 22.7,3.3 9.2,5.3 5,12.7 14.3,18 9.2,5.3 13.5,-2 22.7,3.3 9.2,5.3 5,12.7 14.3,18 9.2,5.3 13.5,-2 22.7,3.3 9.2,5.3 5.8,18.4 16.2,18.5 v 0 c 10.6,0 7.1,-13.2 16.3,-18.5 9.2,-5.3 13.4,2 22.6,-3.2 9.2,-5.3 5,-12.6 14.2,-17.9 9.2,-5.3 13.4,2 22.6,-3.2 9.2,-5.3 5,-12.6 14.2,-17.9 9.2,-5.3 13.4,2.1 22.6,-3.2 8.3,-4.8 5.7,-11.2 11.8,-16.3 v -7.1 c 0.4,-0.8 0.5,-1.8 0,-2.6 h 0.1 z"
id="path81" />
<polygon
class="cls-15"
points="669.3,445.8 727.9,412 673.8,380.8 625.3,408.8 646.5,445.5 655,450.4 "
id="polygon81" />
<path
class="cls-10"
d="m 781.3,543 -3.6,2.1 v 6.1 c 0,2 -1.3,4.5 -2.7,5.3 l -107.6,61.8 c -0.3,0.2 -0.5,0.2 -0.5,0.2 0,0 -0.3,-0.4 -0.3,-1.3 v -8 l -3.6,2.1 -13.9,8 c -1,0.6 -2.1,1.1 -3.1,1.4 -0.5,0.2 -1.1,0.4 -1.6,0.5 -0.6,0 -1.1,0.3 -1.7,0.4 -1.1,0.2 -2.2,0.3 -3.3,0.3 -1.1,0 0,0 0,0 h -0.5 v 5.4 h 2.5 c 0.5,0 0.9,0 1.3,-0.2 v 0 c 0.6,0 1.3,-0.2 1.9,-0.4 1.3,-0.3 2.5,-0.7 3.7,-1.3 0.6,-0.3 1.2,-0.6 1.8,-0.9 l 13,-7.5 v 0.2 c 0,2.1 0.8,3.7 2.1,4.5 0.6,0.3 1.2,0.5 1.8,0.5 0.6,0 1.6,-0.2 2.3,-0.7 L 777,559.7 c 2.6,-1.5 4.5,-5.1 4.5,-8.4 v -2.5 l 17.6,-10.2 v -5.7 l -17.6,10.2 h -0.2 z"
id="path82" />
<path
class="cls-13"
d="m 781.3,539.1 v 0 -1.6 c 0,-2.1 -0.8,-3.7 -2.1,-4.5 -1.2,-0.7 -2.7,-0.6 -4.2,0.2 L 667.4,595 c -2.6,1.5 -4.5,5.1 -4.5,8.4 v 4 0 l -15.6,9 c -1.5,0.9 -3.1,1.4 -4.7,1.8 -1.1,0.2 -2.2,0.4 -3.3,0.4 h -0.5 v 3.4 h 0.5 c 1.1,0 2.2,0 3.3,-0.3 0.6,0 1.1,-0.2 1.7,-0.4 0.6,0 1.1,-0.3 1.6,-0.5 1.1,-0.4 2.1,-0.9 3.1,-1.4 l 13.9,-8 3.6,-2.1 v -3.9 0 -1.9 c 0,-2 1.3,-4.5 2.7,-5.3 l 107.6,-61.8 c 0.3,-0.2 0.5,0 0.5,0 0,0 0.3,0.2 0.3,1.1 v 7.7 l 3.6,-2.1 17.6,-10.2 v 0 -3.9 l -17.7,10.2 h 0.2 z"
id="path83" />
<path
class="cls-13"
d="m 808,479 -44.4,-25.7 v 4.9 l 42.3,24.5 c 3.4,2 5.4,5.5 5.4,9.4 0,3.9 -2,7.4 -5.4,9.4 l -161.7,93.7 c -3.4,1.9 -7.5,1.9 -10.9,0 l -19.8,-11.4 c -0.3,-0.2 -0.7,-0.3 -1.1,-0.3 -1.2,0 -2.1,0.9 -2.1,2.1 0,1.2 0.5,1.5 1.1,1.8 v 0 l 19.7,11.5 c 2.3,1.3 4.9,2 7.5,2 2.6,0 5.2,-0.7 7.5,-2 l 161.7,-93.7 c 4.7,-2.7 7.5,-7.6 7.5,-13 0,-5.4 -2.8,-10.3 -7.5,-13 z"
id="path84" />
<g
id="g93">
<g
id="g89">
<path
class="cls-14"
d="m 514.3,567.6 c -0.5,-0.3 -0.9,-0.3 -1.2,0 l -18.5,10.7 c 0.3,-0.2 0.7,0 1.2,0 l 10.5,6.1 18.5,-10.7 z"
id="path85" />
<path
class="cls-14"
d="m 531.8,600.9 v 9.4 c 0,0.5 -0.2,0.8 -0.5,1 l 18.5,-10.7 c 0.3,-0.2 0.5,-0.5 0.5,-1 v -9.4 z"
id="path86" />
<path
class="cls-14"
d="m 548.8,587.5 -10.5,-6.1 -18.5,10.7 10.5,6.1 c 0.9,0.5 1.6,1.6 1.6,2.6 l 18.5,-10.7 c 0,-1 -0.7,-2.1 -1.6,-2.6 z"
id="path87" />
<polygon
class="cls-14"
points="528,521.1 509.5,531.9 495.1,531.3 513.6,520.6 "
id="polygon87" />
<polygon
class="cls-14"
points="538.2,581.4 519.8,592.2 519.9,539.3 538.4,528.5 "
id="polygon88" />
<path
class="cls-14"
d="m 518.4,536.7 c 0.9,0.5 1.5,1.6 1.5,2.6 v 52.9 l 10.4,6.1 c 0.9,0.5 1.6,1.6 1.6,2.6 v 9.4 c 0,1 -0.7,1.4 -1.6,0.9 l -34.5,-19.9 c -0.9,-0.5 -1.6,-1.7 -1.6,-2.7 v -9.4 c 0,-1 0.7,-1.3 1.6,-0.8 l 10.5,6.1 v -39.1 c 0.1,0 -10.2,-0.8 -10.2,-0.8 -0.4,0 -0.7,0 -0.9,-0.3 -0.6,-0.3 -0.9,-1 -0.9,-1.7 v -10.4 c 0,-0.6 0.3,-0.9 0.8,-0.8 l 14.3,0.5 c 0.6,0 1,0.2 1.8,0.7 l 7.1,4.1 z"
id="path88" />
<path
class="cls-14"
d="m 536.9,525.9 -7.1,-4.1 c -0.8,-0.5 -1.3,-0.6 -1.8,-0.7 l -18.5,10.7 c 0.6,0 1,0.2 1.8,0.7 l 7.1,4.1 c 0.9,0.5 1.5,1.6 1.5,2.6 l 18.5,-10.7 c 0,-1 -0.6,-2.1 -1.5,-2.6 z"
id="path89" />
</g>
<g
id="g92">
<path
class="cls-14"
d="m 582,605 c -4.6,-2.7 -8.1,-8.2 -8,-23.2 v -8.2 c 0,-9.2 1.4,-13.4 3.5,-14.6 l -19.3,11.2 c -2.1,1.2 -3.4,5.4 -3.4,14.6 v 8.2 c 0,15 3.4,20.5 8,23.2 1.8,1 3.4,1.5 4.8,0.7 l 19.3,-11.2 c -1.3,0.8 -3,0.4 -4.8,-0.7 v 0 z"
id="path90" />
<path
class="cls-14"
d="m 582.2,546.3 c -6.5,-3.7 -11.7,-4.8 -15.4,-2.6 l -19.3,11.2 c 3.8,-2.2 9,-1.1 15.4,2.6 14,8.1 22.3,22.2 22.2,44.9 v 8.2 c 0,12.2 -2.5,19.2 -6.8,21.7 l 19.3,-11.2 c 4.4,-2.6 6.8,-9.5 6.8,-21.7 v -8.2 c 0,-22.8 -8.1,-36.8 -22.2,-44.9 z"
id="path91" />
<path
class="cls-14"
d="m 562.9,557.6 c 14,8.1 22.3,22.2 22.2,44.9 v 8.2 c 0,22.8 -8.4,27.2 -22.4,19.1 -14,-8.1 -22.2,-22 -22.1,-44.8 v -8.2 c 0,-22.8 8.3,-27.3 22.3,-19.2 z m 8,44.9 v -8.2 c 0,-15 -3.5,-20.6 -8.1,-23.3 -4.6,-2.7 -8.1,-1.1 -8.1,13.9 v 8.2 c 0,15 3.4,20.5 8,23.2 4.6,2.7 8.2,1.2 8.2,-13.8"
id="path92" />
</g>
</g>
<path
class="cls-11"
d="m 635.8,621.6 c -2.2,-0.4 -4.4,-1.1 -6.4,-2.3 L 614,610.4 v 0 c -0.4,-0.2 -0.8,-0.3 -1.2,-0.3 -1.4,0 -2.5,1.1 -2.5,2.5 0,1.4 0.4,1.6 1.1,2 l 17,9.9 c 1.2,0.7 2.4,1.2 3.6,1.6 1.2,0.4 2.5,0.7 3.8,0.9 0.5,0 1.1,0.2 1.6,0.2 h 1.2 v -5.4 c -0.9,0 -1.9,0 -2.8,-0.3 v 0 z"
id="path93" />
<path
class="cls-6"
d="m 635.8,618.1 c -1.6,-0.3 -3.2,-0.9 -4.7,-1.8 l -18.3,-10.5 c -0.2,0 -0.5,-0.2 -0.8,-0.2 -0.9,0 -1.7,0.8 -1.7,1.7 0,0.9 0.3,1.2 0.9,1.5 l 18.3,10.5 c 2,1.2 4.2,1.9 6.4,2.3 0.9,0.2 1.9,0.3 2.8,0.3 v -3.4 c -1,0 -1.9,-0.2 -2.8,-0.3 h -0.1 z"
id="path94" />
<path
class="cls-6"
d="m 471.5,482.7 42.3,-24.5 v -4.9 L 469.4,479 c -4.7,2.7 -7.5,7.6 -7.5,13 0,5.4 2.8,10.3 7.5,13 l 13.4,7.7 c 0.3,0 0.5,0.2 0.9,0.2 1.2,0 2.1,-0.9 2.1,-2.1 0,-1.2 -0.3,-1.4 -0.9,-1.7 l -13.3,-7.7 c -3.4,-2 -5.4,-5.5 -5.4,-9.4 0,-3.9 2,-7.4 5.4,-9.4 h -0.1 z"
id="path95" />
<path
class="cls-6"
d="m 484.9,532.1 -6.4,-3.7 h -0.1 v 3.9 0 l 4.6,2.7 c 0,0 0.5,0.3 1,0.3 0.9,0 1.7,-0.8 1.7,-1.7 0,-0.9 -0.8,-1.4 -0.8,-1.4 v 0 z"
id="path96" />
<path
class="cls-11"
d="m 484.6,535.8 -6.2,-3.6 v 5.7 l 3.7,2.2 c 0,0 0.6,0.3 1.1,0.3 1.4,0 2.5,-1.1 2.5,-2.5 0,-1.4 -1.2,-2.1 -1.2,-2.1 z"
id="path97" />
<g
id="g100">
<path
class="cls-13"
d="m 702.7,405.8 c 0,0 -6.3,-61.3 -7.1,-67.4 -0.8,-6.1 -4.3,-11.9 -4.3,-11.9 0,0 -37.2,-48 -39.9,-51 -2.7,-3 -6.5,-2.8 -6.5,-2.8 0,0 -37,3.8 -38.5,3.8 -1.5,0 -5.3,1.2 -5.3,1.2 l -6.9,2.4 c 5.7,-0.4 37.2,-4 37.2,-4 0,0 5.5,-1.8 8.7,2.4 3.2,4.2 40.1,52 40.1,52 0,0 3.4,4 4.2,11.1 0.8,7.1 7.1,67.4 7.1,67.4 0,0 0.8,4.5 -3,8.7 -3.8,4.2 -29.2,31.4 -29.2,31.4 0,0 -1,1.3 -2.6,1.8 2.1,-0.6 10.4,-4 11.9,-4.7 1.8,-0.8 2.8,-2.2 2.8,-2.2 0,0 25.3,-26.6 28.9,-30.4 3.6,-3.8 2.8,-7.9 2.8,-7.9 h -0.4 z"
id="path98" />
<path
class="cls-6"
d="m 691.2,408.9 c 0,0 -6.3,-60.3 -7.1,-67.4 -0.8,-7.1 -4.2,-11.1 -4.2,-11.1 0,0 -37,-47.8 -40.1,-52 -3.2,-4.2 -8.7,-2.4 -8.7,-2.4 0,0 -31.4,3.6 -37.2,4 -5.7,0.4 -6.7,6.5 -6.7,6.5 0,0 -10.9,47.8 -12.1,52.2 -1.2,4.3 0,10.7 0,10.7 0,0 25.9,66.4 27.3,69.8 1.4,3.4 4.7,4.7 4.7,4.7 l 45.3,26.7 c 1.3,0.6 2.4,0.6 3.4,0.4 v 0 c 0,0 0.2,0 0.5,-0.1 1.7,-0.6 2.6,-1.8 2.6,-1.8 0,0 25.5,-27.3 29.2,-31.4 3.8,-4.2 3,-8.7 3,-8.7 v 0 z"
id="path99" />
<path
class="cls-17"
d="m 671.9,391.9 c -0.7,-0.4 -4.5,-3.2 -5,-3.9 -0.5,-0.7 -1.2,-1.5 -1.9,-2.9 -0.7,-1.4 0.2,-3.1 0,-13.4 -0.2,-10.3 -3.4,-19.3 -3.6,-19.7 -0.2,-0.4 0,-1.2 0,-1.8 0,-0.6 0.6,-1 0.9,-1.6 0.3,-0.6 1.9,-1 3.4,-1.4 1.5,-0.4 3.6,-1.5 3.6,-1.5 2,-2.1 0,-7 -2.3,-7 -2.3,0 -5.4,3.2 -5.8,3.5 -0.4,0.3 -1.2,0.8 -2.1,0.6 -0.9,-0.2 -1.5,-1.1 -1.5,-1.1 -9.1,-19.1 -19.1,-23.7 -19.2,-24.7 -0.1,-1 0,0 -0.5,-1.8 -0.5,-1.8 0.3,-9.2 0.6,-11.3 0.3,-2.1 -0.9,-6.9 -3.3,-6.1 -2.4,0.8 -1.5,5.1 -1.5,8.2 0,3.1 -0.5,6 -0.5,6 -0.5,0.4 -0.6,2 -0.6,2 -13.1,-5.4 -20.7,2.2 -20.7,2.2 0,0 -0.4,-0.7 -1.4,-1.7 -1,-1 -1.9,-2.9 -2.9,-4.7 -1,-1.8 -2.4,-6.4 -4.3,-7.9 -1.9,-1.5 -3.2,0.2 -3.1,2.9 0.1,2.7 4.1,7.6 4.9,8.8 0.8,1.2 1.9,3.4 2.4,5.2 0.5,1.8 -0.4,2.7 -0.4,2.7 -6.9,10.7 -5.4,26.8 -5.4,26.8 l -1.4,0.5 c -1.4,0.5 -6.3,-1.5 -8.3,-2.1 -2,-0.6 -2.9,3.7 -1,5.9 1.9,2.2 7,1.7 7.6,1.7 0.6,0 1.6,0.7 2.1,1.3 0.5,0.6 0.8,0.7 1.4,1.1 0.6,0.4 0.7,1.7 0.7,1.7 3.5,18.4 9.5,26.1 10.3,27.3 0.8,1.1 1.6,1.8 1.9,5.6 0.3,3.8 -4.7,6.8 -4.7,9.9 0,3.1 1.1,4.9 3.1,5.2 2,0.3 3.1,-5.2 3.5,-6.8 0.4,-1.6 1.3,-2.1 2.1,-2.9 0.8,-0.8 1.7,-0.1 1.7,-0.1 12,12.5 21.7,12.3 21.7,12.3 0,0.7 1.1,2.3 1.7,3.2 0.6,0.9 0.9,4.7 1.3,7.8 0.4,3.1 1.7,6.1 2.4,6.6 0.7,0.5 3,1.7 3,-2 0,-3.7 -2.5,-8.7 -3,-11.4 -0.5,-2.7 0.2,-5.2 0.2,-5.2 11.1,-3.3 14.3,-15.3 14.6,-15.9 0.3,-0.6 2.5,0.3 3.1,1 0.6,0.7 5.4,5.5 5.8,6.3 0.4,0.8 3.9,2.6 4.1,-0.9 0.2,-3.5 -3.2,-5.9 -3.9,-6.3 v -0.2 z M 632.5,354 c 0.6,0.3 3.1,2.1 3.1,2.1 0,0 0.7,1.5 1,2.4 0.3,0.8 0.7,2.3 0.8,3.2 0,0.9 -0.2,1.8 -0.3,2.4 -0.2,0.5 -0.4,1.5 -0.9,1.8 -0.5,0.3 -2.5,0.2 -2.5,0.2 0,0 -0.4,-0.3 -1.3,-1.1 -0.9,-0.8 -2.4,-2.7 -2.5,-3.3 -0.2,-0.6 -0.5,-3.9 -0.5,-3.9 l 0.5,-2 1.3,-1.5 c 0,0 0.8,-0.5 1.4,-0.3 v 0 z m -0.6,-31.1 -1.5,17.5 c 0,0 -0.3,1.5 -0.9,1.8 -0.7,0.3 -1.3,0.2 -2,-0.9 -0.7,-1.1 -10.2,-16.5 -10.2,-16.5 0,0 4.7,-4.6 14.6,-1.8 v 0 z m -19.2,6.9 8.6,16.7 c 0,0 0.7,1.8 0.6,2.9 0,1.2 -0.7,1.9 -2.2,1.8 -1.5,-0.1 -11.5,-1.9 -11.5,-1.9 0,0 -0.3,-13.7 4.5,-19.5 z m 10.5,39.9 -5,11.9 c 0,0 -6.1,-8 -8.8,-22.5 l 11,3.6 c 0,0 1.8,0.3 2.9,1.9 1,1.6 0,5 0,5 h -0.1 z m 0.9,19.3 6.3,-11.2 c 0,0 0.7,-1.4 1.7,-1.3 1,0.2 1.9,2.6 2.2,3.4 0.3,0.8 5,18.1 5,18.1 0,0 -7.5,0 -15.1,-9.1 h -0.1 z m 15.6,-40.3 c -0.9,-0.8 -1.4,-1.7 -1.5,-3.2 -0.1,-1.5 0,-18.7 0,-18.7 0,0 8.6,6.6 13.7,17.7 l -10.1,4.4 c 0,0 -1.3,0.6 -2.2,-0.3 v 0 z m 5.8,49.1 -3.8,-18.5 c 0,0 -0.3,-2.3 0.3,-3.3 0.6,-1 2.8,0.3 3,0.6 0.3,0.3 11.2,9.6 11.2,9.6 0,0 -2.4,8.8 -10.7,11.6 z m 12.7,-20 -11.6,-11.6 c 0,0 -1.6,-1.3 -1.6,-3.1 0,-1.8 1,-2.7 1.6,-3 0.6,-0.3 8.9,-5.9 8.9,-5.9 0,0 3.6,8.3 2.7,23.6 z"
id="path100" />
</g>
<g
id="g110">
<g
id="g109">
<g
id="g105">
<path
class="cls-13"
d="m 510,570.1 c -0.5,-0.3 -0.9,-0.3 -1.2,0 l -18.5,10.7 c 0.3,-0.2 0.7,0 1.2,0 l 10.5,6.1 18.5,-10.7 z"
id="path101" />
<path
class="cls-13"
d="m 527.5,603.4 v 9.4 c 0,0.5 -0.2,0.8 -0.5,1 l 18.5,-10.7 c 0.3,-0.2 0.5,-0.5 0.5,-1 v -9.4 z"
id="path102" />
<path
class="cls-13"
d="m 544.4,590 -10.5,-6.1 -18.5,10.7 10.5,6.1 c 0.9,0.5 1.6,1.6 1.6,2.6 L 546,592.6 c 0,-1 -0.7,-2.1 -1.6,-2.6 z"
id="path103" />
<polygon
class="cls-13"
points="523.6,523.7 505.1,534.4 490.8,533.9 509.3,523.1 "
id="polygon103" />
<polygon
class="cls-13"
points="533.9,584 515.4,594.7 515.6,541.8 534.1,531.1 "
id="polygon104" />
<path
class="cls-6"
d="m 514.1,539.2 c 0.9,0.5 1.5,1.6 1.5,2.6 v 52.9 l 10.4,6.1 c 0.9,0.5 1.6,1.6 1.6,2.6 v 9.4 c 0,1 -0.7,1.4 -1.6,0.9 l -34.5,-19.9 c -0.9,-0.5 -1.6,-1.7 -1.6,-2.7 v -9.4 c 0,-1 0.7,-1.3 1.6,-0.8 L 502,587 v -39.1 c 0.1,0 -10.2,-0.8 -10.2,-0.8 -0.4,0 -0.7,0 -0.9,-0.3 -0.6,-0.3 -0.9,-1 -0.9,-1.7 v -10.4 c 0,-0.6 0.3,-0.9 0.8,-0.8 l 14.3,0.5 c 0.6,0 1,0.2 1.8,0.7 l 7.1,4.1 z"
id="path104" />
<path
class="cls-13"
d="m 532.6,528.5 -7.1,-4.1 c -0.8,-0.5 -1.3,-0.6 -1.8,-0.7 l -18.5,10.7 c 0.6,0 1,0.2 1.8,0.7 l 7.1,4.1 c 0.9,0.5 1.5,1.6 1.5,2.6 l 18.5,-10.7 c 0,-1 -0.6,-2.1 -1.5,-2.6 z"
id="path105" />
</g>
<g
id="g108">
<path
class="cls-13"
d="m 577.7,607.6 c -4.6,-2.7 -8.1,-8.2 -8,-23.2 v -8.2 c 0,-9.2 1.4,-13.4 3.5,-14.6 l -19.3,11.2 c -2.1,1.2 -3.4,5.4 -3.4,14.6 v 8.2 c 0,15 3.4,20.5 8,23.2 1.8,1 3.4,1.5 4.8,0.7 l 19.3,-11.2 c -1.3,0.8 -3,0.4 -4.8,-0.7 v 0 z"
id="path106" />
<path
class="cls-13"
d="m 577.9,548.9 c -6.5,-3.7 -11.7,-4.8 -15.4,-2.6 l -19.3,11.2 c 3.8,-2.2 9,-1.1 15.4,2.6 14,8.1 22.3,22.2 22.2,44.9 v 8.2 c 0,12.2 -2.5,19.2 -6.8,21.7 l 19.3,-11.2 c 4.4,-2.6 6.8,-9.5 6.8,-21.7 v -8.2 C 600.1,571 592,557 577.9,548.9 Z"
id="path107" />
<path
class="cls-6"
d="m 558.6,560.1 c 14,8.1 22.3,22.2 22.2,44.9 v 8.2 c 0,22.8 -8.4,27.2 -22.4,19.1 -14,-8.1 -22.2,-22 -22.1,-44.8 v -8.2 c 0,-22.8 8.3,-27.3 22.3,-19.2 z m 8,44.9 v -8.2 c 0,-15 -3.5,-20.6 -8.1,-23.3 -4.6,-2.7 -8.1,-1.1 -8.1,13.9 v 8.2 c 0,15 3.4,20.5 8,23.2 4.6,2.7 8.2,1.2 8.2,-13.8"
id="path108" />
</g>
</g>
<path
class="cls-2"
d="m 523.6,526.9 c 0,3.1 -2.5,5.6 -5.6,5.6 -3.1,0 -5.6,-2.5 -5.6,-5.6 0,-3.1 2.5,-24.4 5.6,-24.4 3.1,0 5.6,21.3 5.6,24.4 z"
id="path109" />
<path
class="cls-2"
d="m 574.6,550.8 c 0,3.1 -2.5,5.6 -5.6,5.6 -3.1,0 -5.6,-2.5 -5.6,-5.6 0,-3.1 2.5,-24.4 5.6,-24.4 3.1,0 5.6,21.3 5.6,24.4 z"
id="path110" />
</g>
</g>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 39 KiB

View File

@ -477,7 +477,8 @@ section#cncf {
}
// OCEAN NODES
#oceanNodes {
#oceanNodes, .td-home .k8s-overview {
padding-top: $ocean-nodes-padding-Y;
padding-bottom: $ocean-nodes-padding-Y;

View File

@ -1413,4 +1413,101 @@ div.alert > em.javascript-required {
cursor: pointer;
text-align: right;
padding: 0.2rem;
}
// Special style for Kubernetes' 10th birthday
section.k8s-birthday-override {
background: #dae9f9; // Kubernetes light blue
> .k8s-birthday-wrapper {
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
align-content: flex-end;
gap: 2rem;
img.birthday-banner {
max-height: 20rem;
max-width: 100vw;
margin-left: auto;
margin-right: auto;
aspect-ratio: 1;
flex-grow: 1;
}
p {
flex-grow: 4;
text-align: center;
color: $blue;
font-size: 2.5em;
padding-bottom: 2.5rem;
margin: 1em;
}
}
div.k8s-birthday-override.revert-to-previous {
form {
display: block;
max-width: clamp(20rem, 30%, 100vw);
margin-left: auto;
padding-bottom: 0.5rem;
}
text-align: right;
label {
color: $blue;
padding-left: 1.2em;
padding-right: 1.2em;
text-decoration: underline;
font-size: 1.5rem;
}
input {
accent-color: $blue;
border-color: $blue;
visibility: hidden;
}
}
div.k8s-birthday-override.revert-to-previous:has(input:not(:checked)) {
display: initial;
}
&:has(input:checked) {
display: none;
}
}
@media screen and (max-width: 900px) {
section.k8s-birthday-override > .k8s-birthday-wrapper {
flex-wrap: wrap;
}
}
@media screen and (max-width: 900px) {
section.k8s-birthday-override > .k8s-birthday-wrapper {
gap: 0.2rem;
align-content: center;
p {
font-size: initial;
min-width: 80vw;
}
img.birthday-banner {
min-height: initial;
}
label {
font-size: 1.2rem;
}
}
}
@media screen and (max-aspect-ratio: 9/15) {
gap: 0.4rem;
}
@media screen and (min-width: 1281px) {
img.birthday-banner {
min-height: clamp(480px, 25rem, 100vh);
}
}
section.k8s-birthday-override:has(div.k8s-birthday-override.revert-to-previous input:not(:checked)) + section {
display: none;
visibility: hidden;
}

View File

@ -23,7 +23,7 @@ $video-section-height: 550px;
}
}
#oceanNodes {
#oceanNodes, .td-home .k8s-overview {
.main-section {
position: relative;

View File

@ -66,7 +66,7 @@ $feature-box-div-width: 45%;
}
}
#oceanNodes {
#oceanNodes, .td-home .k8s-overview {
h3 {
text-align: left;
margin-bottom: 18px;

View File

@ -30,7 +30,7 @@ cgroup v2 bietet einige Verbesserungen gegenüber cgroup v1, zum Beispiel folgen
- Einheitliche Erfassung für verschiedene Arten der Speicherzuteilung (Netzwerkspeicher, Kernelspeicher, usw.)
- Erfassung nicht-unmittelbarer Ressourcenänderungen wie "page cache write backs"
Manche Kubernetes Funktionen verwenden ausschließlich cgroup v2 für erweitertes Ressourcenmanagement und Isolierung. Die [MemoryQoS](/blog/2021/11/26/qos-memory-resources/) Funktion, zum Beispiel, verbessert Speicher QoS und setzt dabei auf cgroup v2 Primitives.
Manche Kubernetes Funktionen verwenden ausschließlich cgroup v2 für erweitertes Ressourcenmanagement und Isolierung. Die [MemoryQoS](/docs/concepts/workloads/pods/pod-qos/#memory-qos-with-cgroup-v2) Funktion, zum Beispiel, verbessert Speicher QoS und setzt dabei auf cgroup v2 Primitives.
## cgroup v2 verwenden {#cgroupv2-verwenden}

View File

@ -8,8 +8,9 @@ sitemap:
{{< site-searchbar >}}
{{< blocks/section id="oceanNodes" >}}
{{% blocks/feature image="flower" %}}
<!-- automatically replaced if Kubernetes birthday feature active -->
{{< blocks/section class="k8s-overview" >}}
{{% blocks/feature image="flower" id="feature-primary" %}}
[Kubernetes]({{< relref "/docs/concepts/overview/" >}}), also known as K8s, is an open source system for automating deployment, scaling, and management of containerized applications.
It groups containers that make up an application into logical units for easy management and discovery. Kubernetes builds upon [15 years of experience of running production workloads at Google](http://queue.acm.org/detail.cfm?id=2898444), combined with best-of-breed ideas and practices from the community.

View File

@ -48,37 +48,37 @@ Release Notes:
To download, please visit https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.15.0
[1]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6098 "Enabling v1beta3 api version by default in master"
[2]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6182 "Implement multi-port Services"
[3]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6505 "Docker multi-node"
[4]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5442 "Getting started guide for Mesos on Google Cloud Platform"
[5]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6237 "example ansible setup repo"
[6]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5270 "Controller framework"
[7]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5473 "Add DeltaFIFO (a controller framework piece)"
[8]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6380 "Configure the kubelet to use HTTPS (take 2)"
[9]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6338 "Return a typed error for config validation, and make errors simple"
[10]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6190 "Add client cert authentication"
[11]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6207 "Add a limit to the number of in-flight requests that a server processes."
[12]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6355 "Added rate limiting to pod deleting"
[13]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6150 "Implement Balanced Resource Allocation (BRA) algorithm as a PriorityFunction in scheduler package."
[14]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6396 "Enable log collection from master."
[15]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6497 "Pod log subresource"
[16]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6368 "Add basic latency metrics to scheduler."
[17]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6409 "Add latency metrics to REST client"
[18]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6221 "Run etcd 2.0.5 in a pod"
[19]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6334 "Add an nginx docker image for use on the master."
[20]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6326 "Create Docker images for master components "
[21]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6270 "Updates for gcloud 0.9.54"
[22]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6011 "Fix AWS region vs zone"
[23]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6091 "Record event when image GC fails."
[24]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6203 "Add a QPS limiter to the kubernetes client."
[25]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6196 "Parallelize architectures in both the building and packaging phases of `make release`"
[26]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5506 "add iscsi volume plugin"
[27]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6174 "implement glusterfs volume plugin"
[28]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5138 "AWS EBS volume support"
[29]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6331 "Update heapster version to v0.10.0"
[30]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6544 "Build etcd image (version 2.0.9), and upgrade kubernetes cluster to the new version"
[31]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6426 "Update Kibana to v1.2 which paramaterizes location of Elasticsearch"
[32]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6123 "Fix bug in kube-proxy of not updating iptables rules if a service's public IPs change"
[33]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6200 "Retry kube-addons creation if kube-addons creation fails."
[34]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6727 "pkg/proxy: panic if run out of fd"
[1]: https://github.com/kubernetes/kubernetes/pull/6098 "Enabling v1beta3 api version by default in master"
[2]: https://github.com/kubernetes/kubernetes/pull/6182 "Implement multi-port Services"
[3]: https://github.com/kubernetes/kubernetes/pull/6505 "Docker multi-node"
[4]: https://github.com/kubernetes/kubernetes/pull/5442 "Getting started guide for Mesos on Google Cloud Platform"
[5]: https://github.com/kubernetes/kubernetes/pull/6237 "example ansible setup repo"
[6]: https://github.com/kubernetes/kubernetes/pull/5270 "Controller framework"
[7]: https://github.com/kubernetes/kubernetes/pull/5473 "Add DeltaFIFO (a controller framework piece)"
[8]: https://github.com/kubernetes/kubernetes/pull/6380 "Configure the kubelet to use HTTPS (take 2)"
[9]: https://github.com/kubernetes/kubernetes/pull/6338 "Return a typed error for config validation, and make errors simple"
[10]: https://github.com/kubernetes/kubernetes/pull/6190 "Add client cert authentication"
[11]: https://github.com/kubernetes/kubernetes/pull/6207 "Add a limit to the number of in-flight requests that a server processes."
[12]: https://github.com/kubernetes/kubernetes/pull/6355 "Added rate limiting to pod deleting"
[13]: https://github.com/kubernetes/kubernetes/pull/6150 "Implement Balanced Resource Allocation (BRA) algorithm as a PriorityFunction in scheduler package."
[14]: https://github.com/kubernetes/kubernetes/pull/6396 "Enable log collection from master."
[15]: https://github.com/kubernetes/kubernetes/pull/6497 "Pod log subresource"
[16]: https://github.com/kubernetes/kubernetes/pull/6368 "Add basic latency metrics to scheduler."
[17]: https://github.com/kubernetes/kubernetes/pull/6409 "Add latency metrics to REST client"
[18]: https://github.com/kubernetes/kubernetes/pull/6221 "Run etcd 2.0.5 in a pod"
[19]: https://github.com/kubernetes/kubernetes/pull/6334 "Add an nginx docker image for use on the master."
[20]: https://github.com/kubernetes/kubernetes/pull/6326 "Create Docker images for master components "
[21]: https://github.com/kubernetes/kubernetes/pull/6270 "Updates for gcloud 0.9.54"
[22]: https://github.com/kubernetes/kubernetes/pull/6011 "Fix AWS region vs zone"
[23]: https://github.com/kubernetes/kubernetes/pull/6091 "Record event when image GC fails."
[24]: https://github.com/kubernetes/kubernetes/pull/6203 "Add a QPS limiter to the kubernetes client."
[25]: https://github.com/kubernetes/kubernetes/pull/6196 "Parallelize architectures in both the building and packaging phases of `make release`"
[26]: https://github.com/kubernetes/kubernetes/pull/5506 "add iscsi volume plugin"
[27]: https://github.com/kubernetes/kubernetes/pull/6174 "implement glusterfs volume plugin"
[28]: https://github.com/kubernetes/kubernetes/pull/5138 "AWS EBS volume support"
[29]: https://github.com/kubernetes/kubernetes/pull/6331 "Update heapster version to v0.10.0"
[30]: https://github.com/kubernetes/kubernetes/pull/6544 "Build etcd image (version 2.0.9), and upgrade kubernetes cluster to the new version"
[31]: https://github.com/kubernetes/kubernetes/pull/6426 "Update Kibana to v1.2 which paramaterizes location of Elasticsearch"
[32]: https://github.com/kubernetes/kubernetes/pull/6123 "Fix bug in kube-proxy of not updating iptables rules if a service's public IPs change"
[33]: https://github.com/kubernetes/kubernetes/pull/6200 "Retry kube-addons creation if kube-addons creation fails."
[34]: https://github.com/kubernetes/kubernetes/pull/6727 "pkg/proxy: panic if run out of fd"

View File

@ -311,310 +311,306 @@ Release Notes:
To download, please visit https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.17.0
Simple theme. Powered by [Blogger][385].
[ ![][327] ][386]
<!-- [ ![][327] ][386] -->
[1]: http://kubernetes.io/images/nav_logo.svg
[2]: http://kubernetes.io/docs/
[3]: https://kubernetes.io/blog/
[4]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8065 "Remove old salt configs"
[5]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8069 "Kubelet: minor cleanups"
[6]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7940 "update example/walkthrough to v1beta3"
[7]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7946 "update example/rethinkdb to v1beta3"
[8]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7917 "verify the v1beta3 yaml files all work; merge the yaml files"
[9]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7258 "update examples/cassandra to api v1beta3"
[10]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7899 "update service.json in persistent-volume example to v1beta3"
[11]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7864 "update mysql-wordpress example to use v1beta3 API"
[12]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7848 "Update examples/meteor to use API v1beta3"
[13]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7872 "update node-selector example to API v1beta3"
[14]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7824 "update logging-demo to use API v1beta3; modify the way to access Elasticsearch and Kibana services"
[15]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7619 "Convert the skydns rc to use v1beta3 and add a health check to it"
[16]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7728 "update the hazelcast example to API version v1beta3"
[17]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7515 "Fix YAML parsing for v1beta3 objects in the kubelet for file/http"
[18]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7502 "Updated kubectl cluster-info to show v1beta3 addresses"
[19]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7980 "kubelet: Fix racy kubelet tests."
[20]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8079 "kubelet/container: Move prober.ContainerCommandRunner to container."
[21]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6127 "Kubelet: set host field in the pending pod status"
[22]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6442 "Fix the kubelet node watch"
[23]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6607 "Kubelet: recreate mirror pod if the static pod changes"
[24]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7749 "Kubelet: record the timestamp correctly in the runtime cache"
[25]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7729 "Kubelet: wait until container runtime is up"
[26]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7674 "Kubelet: replace DockerManager with the Runtime interface"
[27]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7301 "Kubelet: filter out terminated pods in SyncPods"
[28]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7048 "Kubelet: parallelize cleaning up containers in unwanted pods"
[29]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7952 "kubelet: Add container runtime option for rkt."
[30]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7916 "kubelet/rkt: Remove build label."
[31]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7327 "kubelet/metrics: Move instrumented_docker.go to dockertools."
[32]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7599 "kubelet/rkt: Add GetPods() for rkt."
[33]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7605 "kubelet/rkt: Add KillPod() and GetPodStatus() for rkt."
[34]: https://github.com/GoogleCloudPlatform/kubernetes/pull/4755 "pkg/kubelet: Fix logging."
[35]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6491 "kubelet: Refactor RunInContainer/ExecInContainer/PortForward."
[36]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6609 "kubelet/DockerManager: Fix returning empty error from GetPodStatus()."
[37]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6634 "kubelet: Move pod infra container image setting to dockertools."
[38]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6653 "kubelet/fake_docker_client: Use self's PID instead of 42 in testing."
[39]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6778 "kubelet/dockertool: Move Getpods() to DockerManager."
[40]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6776 "kubelet/dockertools: Add puller interfaces in the containerManager."
[41]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6608 "kubelet: Introduce PodInfraContainerChanged()."
[42]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6795 "kubelet/container: Replace DockerCache with RuntimeCache."
[43]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6844 "kubelet: Clean up computePodContainerChanges."
[44]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7009 "kubelet: Refactor prober."
[45]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7466 "kubelet/container: Update the runtime interface."
[46]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7477 "kubelet: Refactor isPodRunning() in runonce.go"
[47]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7465 "kubelet/rkt: Add basic rkt runtime routines."
[48]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7555 "kubelet/rkt: Add podInfo."
[49]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7488 "kubelet/container: Add GetContainerLogs to runtime interface."
[50]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7543 "kubelet/rkt: Add routines for converting kubelet pod to rkt pod."
[51]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7589 "kubelet/rkt: Add RunPod() for rkt."
[52]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7553 "kubelet/rkt: Add RunInContainer()/ExecInContainer()/PortForward()."
[53]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7613 "kubelet/container: Move ShouldContainerBeRestarted() to runtime."
[54]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7611 "kubelet/rkt: Add SyncPod() to rkt."
[55]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6794 "Kubelet: persist restart count of a container"
[56]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7951 "kubelet/container: Move pty*.go to container runtime package."
[57]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7900 "kubelet: Add container runtime option for rkt."
[58]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7803 "kubelet/rkt: Add docker prefix to image string."
[59]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7849 "kubelet/rkt: Inject dependencies to rkt."
[60]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7859 "kubelet/rkt: Remove dependencies on rkt.store"
[61]: https://github.com/GoogleCloudPlatform/kubernetes/pull/2387 "Kubelet talks securely to apiserver"
[62]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7592 "Rename EnvVarSource.FieldPath -> FieldRef and add example"
[63]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7741 "Add containerized option to kubelet binary"
[64]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7948 "Ease building kubelet image"
[65]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7854 "Remove unnecessary bind-mount from dockerized kubelet run"
[66]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7798 "Add ability to dockerize kubelet in local cluster"
[67]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7797 "Create docker image for kubelet"
[68]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7343 "Security context - types, kubelet, admission"
[69]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7743 "Kubelet: Add rkt as a runtime option"
[70]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7746 "Fix kubelet's docker RunInContainer implementation "
[71]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8018 "AWS: Don't try to copy gce_keys in jenkins e2e job"
[72]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7992 "AWS: Copy some new properties from config-default => config.test"
[73]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7928 "AWS: make it possible to disable minion public ip assignment"
[74]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7667 "update AWS CloudFormation template and cloud-configs"
[75]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7736 "AWS: Fix variable naming that meant not all tokens were written"
[76]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7678 "AWS: Change apiserver to listen on 443 directly, not through nginx"
[77]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6606 "AWS: Improving getting existing VPC and subnet"
[78]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5138 "AWS EBS volume support"
[79]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8089 "Introduce an 'svc' segment for DNS search"
[80]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5707 "Adds ability to define a prefix for etcd paths"
[81]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7973 "Add kubectl log --previous support to view last terminated container log"
[82]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8083 "Add a flag to disable legacy APIs"
[83]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7971 "make the dockerkeyring handle mutiple matching credentials"
[84]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8078 "Convert Fluentd to Cloud Logging pod specs to YAML"
[85]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7704 "Use etcd to allocate PortalIPs instead of in-mem"
[86]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8064 "eliminate auth-path"
[87]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7981 "Record failure reasons for image pulling"
[88]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7869 "Rate limit replica creation"
[89]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7995 "Upgrade to Kibana 4 for cluster logging"
[90]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8049 "Added name to kube-dns service"
[91]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7919 "Fix validation by moving it into the resource builder."
[92]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8050 "Add cache with multiple shards to decrease lock contention"
[93]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8039 "Delete status from displayable resources"
[94]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8044 "Refactor volume interfaces to receive pod instead of ObjectReference"
[95]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7565 "fix kube-down for provider gke"
[96]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7786 "Service port names are required for multi-port"
[97]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8051 "Increase disk size for kubernetes master."
[98]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7774 "expose: Load input object for increased safety"
[99]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7896 "Improments to conversion methods generator"
[100]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7557 "Added displaying external IPs to kubectl cluster-info"
[101]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8037 "Add missing Errorf formatting directives"
[102]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7567 "WIP: Add startup code to apiserver to migrate etcd keys"
[103]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8021 "Use error type from docker go-client instead of string"
[104]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8024 "Accurately get hardware cpu count in Vagrantfile."
[105]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7921 "Stop setting a GKE specific version of the kubeconfig file"
[106]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7950 "Make the API server deal with HEAD requests via the service proxy"
[107]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7983 "GlusterFS Critical Bug Resolved - Removing warning in README"
[108]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7967 "Don't use the first token `uname -n` as the hostname"
[109]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7982 "Call kube-down in test-teardown for vagrant."
[110]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6235 "defaults_tests: verify defaults when converting to an API object"
[111]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7910 "Use the full hostname for mirror pod name."
[112]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7657 "Removes RunPod in the Runtime interface"
[113]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7533 "Clean up dockertools/manager.go and add more unit tests"
[114]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7525 "Adapt pod killing and cleanup for generic container runtime"
[115]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7198 "Fix pod filtering in replication controller"
[116]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7116 "Print container statuses in `kubectl get pods`"
[117]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6992 "Prioritize deleting the non-running pods when reducing replicas"
[118]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6872 "Fix locking issue in pod manager"
[119]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6655 "Limit the number of concurrent tests in integration.go"
[120]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7931 "Fix typos in different config comments"
[121]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7929 "Update cAdvisor dependency."
[122]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5498 "Ubuntu-distro: deprecate & merge ubuntu single node work to ubuntu cluster node stuff"
[123]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7935 "Add control variables to Jenkins E2E script"
[124]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7932 "Check node status as part of validate-cluster.sh."
[125]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7821 "Add old endpoint cleanup function"
[126]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7620 "Support recovery from in the middle of a rename."
[127]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7715 "Update Exec and Portforward client to use pod subresource"
[128]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7564 "Added NFS to PV structs"
[129]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7904 "Fix environment variable error in Vagrant docs"
[130]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7616 "Adds a simple release-note builder that scrapes the GitHub API for recent PRs"
[131]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7668 "Scheduler ignores nodes that are in a bad state"
[132]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7863 "Set GOMAXPROCS for etcd"
[133]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7556 "Auto-generated conversion methods calling one another"
[134]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7445 "Bring up a kuberenetes cluster using coreos image as worker nodes"
[135]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7410 "Godep: Add godep for rkt."
[136]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7870 "Add volumeGetter to rkt."
[137]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7897 "Update cAdvisor dependency."
[138]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7822 "DNS: expose 53/TCP"
[139]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7763 "Set NodeReady=False when docker is dead"
[140]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7857 "Ignore latency metrics for events"
[141]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7792 "SecurityContext admission clean up"
[142]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7832 "Support manually-created and generated conversion functions"
[143]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7833 "Add latency metrics for etcd operations"
[144]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7885 "Update errors_test.go"
[145]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7861 "Change signature of container runtime PullImage to allow pull w/ secret"
[146]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7873 "Fix bug in Service documentation: incorrect location of "selector" in JSON"
[147]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7876 "Fix controller-manager manifest for providers that don't specify CLUSTER_IP_RANGE"
[148]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7867 "Fix controller unittests"
[149]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7751 "Enable GCM and GCL instead of InfluxDB on GCE"
[150]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7862 "Remove restriction that cluster-cidr be a class-b"
[151]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7591 "Fix OpenShift example"
[152]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7718 "API Server - pass path name in context of create request for subresource"
[153]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7575 "Rolling Updates: Add support for --rollback."
[154]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7820 "Update to container-vm-v20150505 (Also updates GCE to Docker 1.6)"
[155]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7830 "Fix metric label"
[156]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7838 "Fix v1beta1 typos in v1beta2 conversions"
[157]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7764 "skydns: use the etcd-2.x native syntax, enable IANA attributed ports."
[158]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7794 "Added port 6443 to kube-proxy default IP address for api-server"
[159]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7834 "Added client header info for authentication doc."
[160]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7827 "Clean up safe_format_and_mount spam in the startup logs"
[161]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7829 "Set allocate_node_cidrs to be blank by default."
[162]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5246 "Make nodecontroller configure nodes' pod IP ranges"
[163]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7799 "Fix sync problems in #5246"
[164]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7823 "Fix event doc link"
[165]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7776 "Cobra update and bash completions fix"
[166]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7675 "Fix kube2sky flakes. Fix tools.GetEtcdVersion to work with etcd > 2.0.7"
[167]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7154 "Change kube2sky to use token-system-dns secret, point at https endpoint ..."
[168]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7468 "replica: serialize created-by reference"
[169]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7702 "Inject mounter into volume plugins"
[170]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6973 "bringing CoreOS cloud-configs up-to-date (against 0.15.x and latest OS' alpha) "
[171]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7787 "Update kubeconfig-file doc."
[172]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7780 "Throw an API error when deleting namespace in termination"
[173]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7773 "Fix command field PodExecOptions"
[174]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7785 "Start ImageManager housekeeping in Run()."
[175]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7769 "fix DeepCopy to properly support runtime.EmbeddedObject"
[176]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7273 "fix master service endpoint system for multiple masters"
[177]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7757 "Add genbashcomp to KUBE_TEST_TARGETS"
[178]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7669 "Change the cloud provider TCPLoadBalancerExists function to GetTCPLoadBalancer..."
[179]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7772 "Add containerized option to kubelet binary"
[180]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7779 "Fix swagger spec"
[181]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7750 "Hyperkube image requires root certificates to work with cloud-providers (at least AWS)"
[182]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7755 "FIX: Issue #7750 - Hyperkube docker image needs certificates to connect to cloud-providers"
[183]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7752 "Add build labels to rkt"
[184]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7672 "Check license boilerplate for python files"
[185]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7705 "Reliable updates in rollingupdate"
[186]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7650 "Don't exit abruptly if there aren't yet any minions right after the cluster is created."
[187]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7742 "Make changes suggested in #7675"
[188]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7357 "A guide to set up kubernetes multiple nodes cluster with flannel on fedora"
[189]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7760 "Setup generators in factory"
[190]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7737 "Reduce usage of time.After"
[191]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7735 "Remove node status from "componentstatuses" call."
[192]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7614 "React to failure by growing the remaining clusters"
[193]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7725 "Fix typo in runtime_cache.go"
[194]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7740 "Update non-GCE Salt distros to 1.6.0, fallback to ContainerVM Docker version on GCE"
[195]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7744 "Skip SaltStack install if it's already installed"
[196]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7712 "Expose pod name as a label on containers."
[197]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7732 "Log which SSH key is used in e2e SSH test"
[198]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7649 "Add a central simple getting started guide with kubernetes guide."
[199]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7443 "Explicitly state the lack of support for 'Requests' for the purposes of scheduling"
[200]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7721 "Select IPv4-only from host interfaces"
[201]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7723 "Metrics tests can't run on Mac"
[202]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7727 "Add step to API changes doc for swagger regen"
[203]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7703 "Add NsenterMounter mount implementation"
[204]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7509 "add StringSet.HasAny"
[205]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6941 "Add an integration test that checks for the metrics we expect to be exported from the master"
[206]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7722 "Minor bash update found by shellcheck.net"
[207]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7536 "Add --hostport to run-container."
[208]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7659 "Have rkt implement the container Runtime interface"
[209]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7629 "Change the order the different versions of API are registered "
[210]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7699 "expose: Create objects in a generic way"
[211]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7643 "Requeue rc if a single get/put retry on status.Replicas fails"
[212]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7316 "logs for master components"
[213]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7522 "cloudproviders: add ovirt getting started guide"
[214]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7671 "Make rkt-install a oneshot."
[215]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7665 "Provide container_runtime flag to Kubelet in CoreOS."
[216]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7654 "Boilerplate speedup"
[217]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7700 "Log host for failed pod in Density test"
[218]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7655 "Removes spurious quotation mark"
[219]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7694 "Add kubectl_label to custom functions in bash completion"
[220]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7696 "Enable profiling in kube-controller"
[221]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7690 "Set vagrant test cluster default NUM_MINIONS=2"
[222]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7695 "Add metrics to measure cache hit ratio"
[223]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7662 "Change IP to IP(S) in service columns for kubectl get"
[224]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7076 "annotate required flags for bash_completions"
[225]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7685 "(minor) Add pgrep debugging to etcd error"
[226]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7676 "Fixed nil pointer issue in describe when volume is unbound"
[227]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7691 "Removed unnecessary closing bracket"
[228]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7432 "Added TerminationGracePeriod field to PodSpec and grace-period flag to kubectl stop"
[229]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7689 "Fix boilerplate in test/e2e/scale.go"
[230]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7628 "Update expiration timeout based on observed latencies"
[231]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7644 "Output generated conversion functions/names"
[232]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7645 "Move the scale tests into a separate file"
[233]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7646 "Moved the Scale tests into a scale file. #7645"
[234]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7609 "Truncate GCE load balancer names to 63 chars"
[235]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7603 "Add SyncPod() and remove Kill/Run InContainer()."
[236]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7663 "Merge release 0.16 to master"
[237]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7637 "Update license boilerplate for examples/rethinkdb"
[238]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7268 "First part of improved rolling update, allow dynamic next replication controller generation."
[239]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7638 "Add license boilerplate to examples/phabricator"
[240]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7597 "Use generic copyright holder name in license boilerplate"
[241]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7633 "Retry incrementing quota if there is a conflict"
[242]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7568 "Remove GetContainers from Runtime interface"
[243]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7578 "Add image-related methods to DockerManager"
[244]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7586 "Remove more docker references in kubelet"
[245]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7601 "Add KillContainerInPod in DockerManager"
[246]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7652 "Kubelet: Add container runtime option."
[247]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7626 "bump heapster to v0.11.0 and grafana to v0.7.0"
[248]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7593 "Build github.com/onsi/ginkgo/ginkgo as a part of the release"
[249]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7490 "Do not automatically decode runtime.RawExtension"
[250]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7500 "Update changelog."
[251]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7610 "Add SyncPod() to DockerManager and use it in Kubelet"
[252]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7602 "Build: Push .md5 and .sha1 files for every file we push to GCS"
[253]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7540 "Fix rolling update --image "
[254]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7636 "Update license boilerplate for docs/man/md2man-all.sh"
[255]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7632 "Include shell license boilerplate in examples/k8petstore"
[256]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7277 "Add --cgroup_parent flag to Kubelet to set the parent cgroup for pods"
[257]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7209 "change the current dir to the config dir"
[258]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7158 "Set Weave To 0.9.0 And Update Etcd Configuration For Azure"
[259]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7467 "Augment describe to search for matching things if it doesn't match the original resource."
[260]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7559 "Add a simple cache for objects stored in etcd."
[261]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7549 "Rkt gc"
[262]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7550 "Rkt pull"
[263]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6400 "Implement Mount interface using mount(8) and umount(8)"
[264]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7588 "Trim Fleuntd tag for Cloud Logging"
[265]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7569 "GCE CoreOS cluster - set master name based on variable"
[266]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7535 "Capitalization of KubeProxyVersion wrong in JSON"
[267]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7530 "Make nodes report their external IP rather than the master's."
[268]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7539 "Trim cluster log tags to pod name and container name"
[269]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7541 "Handle conversion of boolean query parameters with a value of "false""
[270]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7532 "Add image-related methods to Runtime interface."
[271]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7560 "Test whether auto-generated conversions weren't manually edited"
[272]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7484 "Mention :latest behavior for image version tag"
[273]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7487 "readinessProbe calls livenessProbe.Exec.Command which cause "invalid memory address or nil pointer dereference"."
[274]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7520 "Add RuntimeHooks to abstract Kubelet logic"
[275]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7546 "Expose URL() on Request to allow building URLs"
[276]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7288 "Add a simple cache for objects stored in etcd"
[277]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7431 "Prepare for chaining autogenerated conversion methods "
[278]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7353 "Increase maxIdleConnection limit when creating etcd client in apiserver."
[279]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7354 "Improvements to generator of conversion methods."
[280]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7107 "Code to automatically generate conversion methods"
[281]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7407 "Support recovery for anonymous roll outs"
[282]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7527 "Bump kube2sky to 1.2. Point it at https endpoint (3rd try)."
[283]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7526 "cluster/gce/coreos: Add metadata-service in node.yaml"
[284]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7480 "Move ComputePodChanges to the Docker runtime"
[285]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7510 "Cobra rebase"
[286]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6718 "Adding system oom events from kubelet"
[287]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7479 "Move Prober to its own subpackage"
[288]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7513 "Fix parallel-e2e.sh to work on my macbook (bash v3.2)"
[289]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7449 "Move network plugin TearDown to DockerManager"
[290]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7498 "CoreOS Getting Started Guide not working"
[291]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7499 "Fixes #7498 - CoreOS Getting Started Guide had invalid cloud config"
[292]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7504 "Fix invalid character '"' after object key:value pair"
[293]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7317 "GlusterFS Volume Plugin deletes the contents of the mounted volume upon Pod deletion"
[294]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7503 "Fixed kubelet deleting data from volumes on stop (#7317)."
[295]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7482 "Fixing hooks/description to catch API fields without description tags"
[296]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7457 "cadvisor is obsoleted so kubelet service does not require it."
[297]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7408 "Set the default namespace for events to be "default""
[298]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7446 "Fix typo in namespace conversion"
[299]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7419 "Convert Secret registry to use update/create strategy, allow filtering by Type"
[300]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7102 "Use pod namespace when looking for its GlusterFS endpoints."
[301]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7427 "Fixed name of kube-proxy path in deployment scripts."
[
[4]: https://github.com/kubernetes/kubernetes/pull/8065 "Remove old salt configs"
[5]: https://github.com/kubernetes/kubernetes/pull/8069 "Kubelet: minor cleanups"
[6]: https://github.com/kubernetes/kubernetes/pull/7940 "update example/walkthrough to v1beta3"
[7]: https://github.com/kubernetes/kubernetes/pull/7946 "update example/rethinkdb to v1beta3"
[8]: https://github.com/kubernetes/kubernetes/pull/7917 "verify the v1beta3 yaml files all work; merge the yaml files"
[9]: https://github.com/kubernetes/kubernetes/pull/7258 "update examples/cassandra to api v1beta3"
[10]: https://github.com/kubernetes/kubernetes/pull/7899 "update service.json in persistent-volume example to v1beta3"
[11]: https://github.com/kubernetes/kubernetes/pull/7864 "update mysql-wordpress example to use v1beta3 API"
[12]: https://github.com/kubernetes/kubernetes/pull/7848 "Update examples/meteor to use API v1beta3"
[13]: https://github.com/kubernetes/kubernetes/pull/7872 "update node-selector example to API v1beta3"
[14]: https://github.com/kubernetes/kubernetes/pull/7824 "update logging-demo to use API v1beta3; modify the way to access Elasticsearch and Kibana services"
[15]: https://github.com/kubernetes/kubernetes/pull/7619 "Convert the skydns rc to use v1beta3 and add a health check to it"
[16]: https://github.com/kubernetes/kubernetes/pull/7728 "update the hazelcast example to API version v1beta3"
[17]: https://github.com/kubernetes/kubernetes/pull/7515 "Fix YAML parsing for v1beta3 objects in the kubelet for file/http"
[18]: https://github.com/kubernetes/kubernetes/pull/7502 "Updated kubectl cluster-info to show v1beta3 addresses"
[19]: https://github.com/kubernetes/kubernetes/pull/7980 "kubelet: Fix racy kubelet tests."
[20]: https://github.com/kubernetes/kubernetes/pull/8079 "kubelet/container: Move prober.ContainerCommandRunner to container."
[21]: https://github.com/kubernetes/kubernetes/pull/6127 "Kubelet: set host field in the pending pod status"
[22]: https://github.com/kubernetes/kubernetes/pull/6442 "Fix the kubelet node watch"
[23]: https://github.com/kubernetes/kubernetes/pull/6607 "Kubelet: recreate mirror pod if the static pod changes"
[24]: https://github.com/kubernetes/kubernetes/pull/7749 "Kubelet: record the timestamp correctly in the runtime cache"
[25]: https://github.com/kubernetes/kubernetes/pull/7729 "Kubelet: wait until container runtime is up"
[26]: https://github.com/kubernetes/kubernetes/pull/7674 "Kubelet: replace DockerManager with the Runtime interface"
[27]: https://github.com/kubernetes/kubernetes/pull/7301 "Kubelet: filter out terminated pods in SyncPods"
[28]: https://github.com/kubernetes/kubernetes/pull/7048 "Kubelet: parallelize cleaning up containers in unwanted pods"
[29]: https://github.com/kubernetes/kubernetes/pull/7952 "kubelet: Add container runtime option for rkt."
[30]: https://github.com/kubernetes/kubernetes/pull/7916 "kubelet/rkt: Remove build label."
[31]: https://github.com/kubernetes/kubernetes/pull/7327 "kubelet/metrics: Move instrumented_docker.go to dockertools."
[32]: https://github.com/kubernetes/kubernetes/pull/7599 "kubelet/rkt: Add GetPods() for rkt."
[33]: https://github.com/kubernetes/kubernetes/pull/7605 "kubelet/rkt: Add KillPod() and GetPodStatus() for rkt."
[34]: https://github.com/kubernetes/kubernetes/pull/4755 "pkg/kubelet: Fix logging."
[35]: https://github.com/kubernetes/kubernetes/pull/6491 "kubelet: Refactor RunInContainer/ExecInContainer/PortForward."
[36]: https://github.com/kubernetes/kubernetes/pull/6609 "kubelet/DockerManager: Fix returning empty error from GetPodStatus()."
[37]: https://github.com/kubernetes/kubernetes/pull/6634 "kubelet: Move pod infra container image setting to dockertools."
[38]: https://github.com/kubernetes/kubernetes/pull/6653 "kubelet/fake_docker_client: Use self's PID instead of 42 in testing."
[39]: https://github.com/kubernetes/kubernetes/pull/6778 "kubelet/dockertool: Move Getpods() to DockerManager."
[40]: https://github.com/kubernetes/kubernetes/pull/6776 "kubelet/dockertools: Add puller interfaces in the containerManager."
[41]: https://github.com/kubernetes/kubernetes/pull/6608 "kubelet: Introduce PodInfraContainerChanged()."
[42]: https://github.com/kubernetes/kubernetes/pull/6795 "kubelet/container: Replace DockerCache with RuntimeCache."
[43]: https://github.com/kubernetes/kubernetes/pull/6844 "kubelet: Clean up computePodContainerChanges."
[44]: https://github.com/kubernetes/kubernetes/pull/7009 "kubelet: Refactor prober."
[45]: https://github.com/kubernetes/kubernetes/pull/7466 "kubelet/container: Update the runtime interface."
[46]: https://github.com/kubernetes/kubernetes/pull/7477 "kubelet: Refactor isPodRunning() in runonce.go"
[47]: https://github.com/kubernetes/kubernetes/pull/7465 "kubelet/rkt: Add basic rkt runtime routines."
[48]: https://github.com/kubernetes/kubernetes/pull/7555 "kubelet/rkt: Add podInfo."
[49]: https://github.com/kubernetes/kubernetes/pull/7488 "kubelet/container: Add GetContainerLogs to runtime interface."
[50]: https://github.com/kubernetes/kubernetes/pull/7543 "kubelet/rkt: Add routines for converting kubelet pod to rkt pod."
[51]: https://github.com/kubernetes/kubernetes/pull/7589 "kubelet/rkt: Add RunPod() for rkt."
[52]: https://github.com/kubernetes/kubernetes/pull/7553 "kubelet/rkt: Add RunInContainer()/ExecInContainer()/PortForward()."
[53]: https://github.com/kubernetes/kubernetes/pull/7613 "kubelet/container: Move ShouldContainerBeRestarted() to runtime."
[54]: https://github.com/kubernetes/kubernetes/pull/7611 "kubelet/rkt: Add SyncPod() to rkt."
[55]: https://github.com/kubernetes/kubernetes/pull/6794 "Kubelet: persist restart count of a container"
[56]: https://github.com/kubernetes/kubernetes/pull/7951 "kubelet/container: Move pty*.go to container runtime package."
[57]: https://github.com/kubernetes/kubernetes/pull/7900 "kubelet: Add container runtime option for rkt."
[58]: https://github.com/kubernetes/kubernetes/pull/7803 "kubelet/rkt: Add docker prefix to image string."
[59]: https://github.com/kubernetes/kubernetes/pull/7849 "kubelet/rkt: Inject dependencies to rkt."
[60]: https://github.com/kubernetes/kubernetes/pull/7859 "kubelet/rkt: Remove dependencies on rkt.store"
[61]: https://github.com/kubernetes/kubernetes/pull/2387 "Kubelet talks securely to apiserver"
[62]: https://github.com/kubernetes/kubernetes/pull/7592 "Rename EnvVarSource.FieldPath -> FieldRef and add example"
[63]: https://github.com/kubernetes/kubernetes/pull/7741 "Add containerized option to kubelet binary"
[64]: https://github.com/kubernetes/kubernetes/pull/7948 "Ease building kubelet image"
[65]: https://github.com/kubernetes/kubernetes/pull/7854 "Remove unnecessary bind-mount from dockerized kubelet run"
[66]: https://github.com/kubernetes/kubernetes/pull/7798 "Add ability to dockerize kubelet in local cluster"
[67]: https://github.com/kubernetes/kubernetes/pull/7797 "Create docker image for kubelet"
[68]: https://github.com/kubernetes/kubernetes/pull/7343 "Security context - types, kubelet, admission"
[69]: https://github.com/kubernetes/kubernetes/pull/7743 "Kubelet: Add rkt as a runtime option"
[70]: https://github.com/kubernetes/kubernetes/pull/7746 "Fix kubelet's docker RunInContainer implementation "
[71]: https://github.com/kubernetes/kubernetes/pull/8018 "AWS: Don't try to copy gce_keys in jenkins e2e job"
[72]: https://github.com/kubernetes/kubernetes/pull/7992 "AWS: Copy some new properties from config-default => config.test"
[73]: https://github.com/kubernetes/kubernetes/pull/7928 "AWS: make it possible to disable minion public ip assignment"
[74]: https://github.com/kubernetes/kubernetes/pull/7667 "update AWS CloudFormation template and cloud-configs"
[75]: https://github.com/kubernetes/kubernetes/pull/7736 "AWS: Fix variable naming that meant not all tokens were written"
[76]: https://github.com/kubernetes/kubernetes/pull/7678 "AWS: Change apiserver to listen on 443 directly, not through nginx"
[77]: https://github.com/kubernetes/kubernetes/pull/6606 "AWS: Improving getting existing VPC and subnet"
[78]: https://github.com/kubernetes/kubernetes/pull/5138 "AWS EBS volume support"
[79]: https://github.com/kubernetes/kubernetes/pull/8089 "Introduce an 'svc' segment for DNS search"
[80]: https://github.com/kubernetes/kubernetes/pull/5707 "Adds ability to define a prefix for etcd paths"
[81]: https://github.com/kubernetes/kubernetes/pull/7973 "Add kubectl log --previous support to view last terminated container log"
[82]: https://github.com/kubernetes/kubernetes/pull/8083 "Add a flag to disable legacy APIs"
[83]: https://github.com/kubernetes/kubernetes/pull/7971 "make the dockerkeyring handle mutiple matching credentials"
[84]: https://github.com/kubernetes/kubernetes/pull/8078 "Convert Fluentd to Cloud Logging pod specs to YAML"
[85]: https://github.com/kubernetes/kubernetes/pull/7704 "Use etcd to allocate PortalIPs instead of in-mem"
[86]: https://github.com/kubernetes/kubernetes/pull/8064 "eliminate auth-path"
[87]: https://github.com/kubernetes/kubernetes/pull/7981 "Record failure reasons for image pulling"
[88]: https://github.com/kubernetes/kubernetes/pull/7869 "Rate limit replica creation"
[89]: https://github.com/kubernetes/kubernetes/pull/7995 "Upgrade to Kibana 4 for cluster logging"
[90]: https://github.com/kubernetes/kubernetes/pull/8049 "Added name to kube-dns service"
[91]: https://github.com/kubernetes/kubernetes/pull/7919 "Fix validation by moving it into the resource builder."
[92]: https://github.com/kubernetes/kubernetes/pull/8050 "Add cache with multiple shards to decrease lock contention"
[93]: https://github.com/kubernetes/kubernetes/pull/8039 "Delete status from displayable resources"
[94]: https://github.com/kubernetes/kubernetes/pull/8044 "Refactor volume interfaces to receive pod instead of ObjectReference"
[95]: https://github.com/kubernetes/kubernetes/pull/7565 "fix kube-down for provider gke"
[96]: https://github.com/kubernetes/kubernetes/pull/7786 "Service port names are required for multi-port"
[97]: https://github.com/kubernetes/kubernetes/pull/8051 "Increase disk size for kubernetes master."
[98]: https://github.com/kubernetes/kubernetes/pull/7774 "expose: Load input object for increased safety"
[99]: https://github.com/kubernetes/kubernetes/pull/7896 "Improments to conversion methods generator"
[100]: https://github.com/kubernetes/kubernetes/pull/7557 "Added displaying external IPs to kubectl cluster-info"
[101]: https://github.com/kubernetes/kubernetes/pull/8037 "Add missing Errorf formatting directives"
[102]: https://github.com/kubernetes/kubernetes/pull/7567 "WIP: Add startup code to apiserver to migrate etcd keys"
[103]: https://github.com/kubernetes/kubernetes/pull/8021 "Use error type from docker go-client instead of string"
[104]: https://github.com/kubernetes/kubernetes/pull/8024 "Accurately get hardware cpu count in Vagrantfile."
[105]: https://github.com/kubernetes/kubernetes/pull/7921 "Stop setting a GKE specific version of the kubeconfig file"
[106]: https://github.com/kubernetes/kubernetes/pull/7950 "Make the API server deal with HEAD requests via the service proxy"
[107]: https://github.com/kubernetes/kubernetes/pull/7983 "GlusterFS Critical Bug Resolved - Removing warning in README"
[108]: https://github.com/kubernetes/kubernetes/pull/7967 "Don't use the first token `uname -n` as the hostname"
[109]: https://github.com/kubernetes/kubernetes/pull/7982 "Call kube-down in test-teardown for vagrant."
[110]: https://github.com/kubernetes/kubernetes/pull/6235 "defaults_tests: verify defaults when converting to an API object"
[111]: https://github.com/kubernetes/kubernetes/pull/7910 "Use the full hostname for mirror pod name."
[112]: https://github.com/kubernetes/kubernetes/pull/7657 "Removes RunPod in the Runtime interface"
[113]: https://github.com/kubernetes/kubernetes/pull/7533 "Clean up dockertools/manager.go and add more unit tests"
[114]: https://github.com/kubernetes/kubernetes/pull/7525 "Adapt pod killing and cleanup for generic container runtime"
[115]: https://github.com/kubernetes/kubernetes/pull/7198 "Fix pod filtering in replication controller"
[116]: https://github.com/kubernetes/kubernetes/pull/7116 "Print container statuses in `kubectl get pods`"
[117]: https://github.com/kubernetes/kubernetes/pull/6992 "Prioritize deleting the non-running pods when reducing replicas"
[118]: https://github.com/kubernetes/kubernetes/pull/6872 "Fix locking issue in pod manager"
[119]: https://github.com/kubernetes/kubernetes/pull/6655 "Limit the number of concurrent tests in integration.go"
[120]: https://github.com/kubernetes/kubernetes/pull/7931 "Fix typos in different config comments"
[121]: https://github.com/kubernetes/kubernetes/pull/7929 "Update cAdvisor dependency."
[122]: https://github.com/kubernetes/kubernetes/pull/5498 "Ubuntu-distro: deprecate & merge ubuntu single node work to ubuntu cluster node stuff"
[123]: https://github.com/kubernetes/kubernetes/pull/7935 "Add control variables to Jenkins E2E script"
[124]: https://github.com/kubernetes/kubernetes/pull/7932 "Check node status as part of validate-cluster.sh."
[125]: https://github.com/kubernetes/kubernetes/pull/7821 "Add old endpoint cleanup function"
[126]: https://github.com/kubernetes/kubernetes/pull/7620 "Support recovery from in the middle of a rename."
[127]: https://github.com/kubernetes/kubernetes/pull/7715 "Update Exec and Portforward client to use pod subresource"
[128]: https://github.com/kubernetes/kubernetes/pull/7564 "Added NFS to PV structs"
[129]: https://github.com/kubernetes/kubernetes/pull/7904 "Fix environment variable error in Vagrant docs"
[130]: https://github.com/kubernetes/kubernetes/pull/7616 "Adds a simple release-note builder that scrapes the GitHub API for recent PRs"
[131]: https://github.com/kubernetes/kubernetes/pull/7668 "Scheduler ignores nodes that are in a bad state"
[132]: https://github.com/kubernetes/kubernetes/pull/7863 "Set GOMAXPROCS for etcd"
[133]: https://github.com/kubernetes/kubernetes/pull/7556 "Auto-generated conversion methods calling one another"
[134]: https://github.com/kubernetes/kubernetes/pull/7445 "Bring up a kuberenetes cluster using coreos image as worker nodes"
[135]: https://github.com/kubernetes/kubernetes/pull/7410 "Godep: Add godep for rkt."
[136]: https://github.com/kubernetes/kubernetes/pull/7870 "Add volumeGetter to rkt."
[137]: https://github.com/kubernetes/kubernetes/pull/7897 "Update cAdvisor dependency."
[138]: https://github.com/kubernetes/kubernetes/pull/7822 "DNS: expose 53/TCP"
[139]: https://github.com/kubernetes/kubernetes/pull/7763 "Set NodeReady=False when docker is dead"
[140]: https://github.com/kubernetes/kubernetes/pull/7857 "Ignore latency metrics for events"
[141]: https://github.com/kubernetes/kubernetes/pull/7792 "SecurityContext admission clean up"
[142]: https://github.com/kubernetes/kubernetes/pull/7832 "Support manually-created and generated conversion functions"
[143]: https://github.com/kubernetes/kubernetes/pull/7833 "Add latency metrics for etcd operations"
[144]: https://github.com/kubernetes/kubernetes/pull/7885 "Update errors_test.go"
[145]: https://github.com/kubernetes/kubernetes/pull/7861 "Change signature of container runtime PullImage to allow pull w/ secret"
[146]: https://github.com/kubernetes/kubernetes/pull/7873 "Fix bug in Service documentation: incorrect location of `selector` in JSON"
[147]: https://github.com/kubernetes/kubernetes/pull/7876 "Fix controller-manager manifest for providers that don't specify CLUSTER_IP_RANGE"
[148]: https://github.com/kubernetes/kubernetes/pull/7867 "Fix controller unittests"
[149]: https://github.com/kubernetes/kubernetes/pull/7751 "Enable GCM and GCL instead of InfluxDB on GCE"
[150]: https://github.com/kubernetes/kubernetes/pull/7862 "Remove restriction that cluster-cidr be a class-b"
[151]: https://github.com/kubernetes/kubernetes/pull/7591 "Fix OpenShift example"
[152]: https://github.com/kubernetes/kubernetes/pull/7718 "API Server - pass path name in context of create request for subresource"
[153]: https://github.com/kubernetes/kubernetes/pull/7575 "Rolling Updates: Add support for --rollback."
[154]: https://github.com/kubernetes/kubernetes/pull/7820 "Update to container-vm-v20150505 (Also updates GCE to Docker 1.6)"
[155]: https://github.com/kubernetes/kubernetes/pull/7830 "Fix metric label"
[156]: https://github.com/kubernetes/kubernetes/pull/7838 "Fix v1beta1 typos in v1beta2 conversions"
[157]: https://github.com/kubernetes/kubernetes/pull/7764 "skydns: use the etcd-2.x native syntax, enable IANA attributed ports."
[158]: https://github.com/kubernetes/kubernetes/pull/7794 "Added port 6443 to kube-proxy default IP address for api-server"
[159]: https://github.com/kubernetes/kubernetes/pull/7834 "Added client header info for authentication doc."
[160]: https://github.com/kubernetes/kubernetes/pull/7827 "Clean up safe_format_and_mount spam in the startup logs"
[161]: https://github.com/kubernetes/kubernetes/pull/7829 "Set allocate_node_cidrs to be blank by default."
[162]: https://github.com/kubernetes/kubernetes/pull/5246 "Make nodecontroller configure nodes' pod IP ranges"
[163]: https://github.com/kubernetes/kubernetes/pull/7799 "Fix sync problems in #5246"
[164]: https://github.com/kubernetes/kubernetes/pull/7823 "Fix event doc link"
[165]: https://github.com/kubernetes/kubernetes/pull/7776 "Cobra update and bash completions fix"
[166]: https://github.com/kubernetes/kubernetes/pull/7675 "Fix kube2sky flakes. Fix tools.GetEtcdVersion to work with etcd > 2.0.7"
[167]: https://github.com/kubernetes/kubernetes/pull/7154 "Change kube2sky to use token-system-dns secret, point at https endpoint ..."
[168]: https://github.com/kubernetes/kubernetes/pull/7468 "replica: serialize created-by reference"
[169]: https://github.com/kubernetes/kubernetes/pull/7702 "Inject mounter into volume plugins"
[170]: https://github.com/kubernetes/kubernetes/pull/6973 "bringing CoreOS cloud-configs up-to-date (against 0.15.x and latest OS' alpha) "
[171]: https://github.com/kubernetes/kubernetes/pull/7787 "Update kubeconfig-file doc."
[172]: https://github.com/kubernetes/kubernetes/pull/7780 "Throw an API error when deleting namespace in termination"
[173]: https://github.com/kubernetes/kubernetes/pull/7773 "Fix command field PodExecOptions"
[174]: https://github.com/kubernetes/kubernetes/pull/7785 "Start ImageManager housekeeping in Run()."
[175]: https://github.com/kubernetes/kubernetes/pull/7769 "fix DeepCopy to properly support runtime.EmbeddedObject"
[176]: https://github.com/kubernetes/kubernetes/pull/7273 "fix master service endpoint system for multiple masters"
[177]: https://github.com/kubernetes/kubernetes/pull/7757 "Add genbashcomp to KUBE_TEST_TARGETS"
[178]: https://github.com/kubernetes/kubernetes/pull/7669 "Change the cloud provider TCPLoadBalancerExists function to GetTCPLoadBalancer..."
[179]: https://github.com/kubernetes/kubernetes/pull/7772 "Add containerized option to kubelet binary"
[180]: https://github.com/kubernetes/kubernetes/pull/7779 "Fix swagger spec"
[181]: https://github.com/kubernetes/kubernetes/issues/7750 "Hyperkube image requires root certificates to work with cloud-providers (at least AWS)"
[182]: https://github.com/kubernetes/kubernetes/pull/7755 "FIX: Issue #7750 - Hyperkube docker image needs certificates to connect to cloud-providers"
[183]: https://github.com/kubernetes/kubernetes/pull/7752 "Add build labels to rkt"
[184]: https://github.com/kubernetes/kubernetes/pull/7672 "Check license boilerplate for python files"
[185]: https://github.com/kubernetes/kubernetes/pull/7705 "Reliable updates in rollingupdate"
[186]: https://github.com/kubernetes/kubernetes/pull/7650 "Don't exit abruptly if there aren't yet any minions right after the cluster is created."
[187]: https://github.com/kubernetes/kubernetes/pull/7742 "Make changes suggested in #7675"
[188]: https://github.com/kubernetes/kubernetes/pull/7357 "A guide to set up kubernetes multiple nodes cluster with flannel on fedora"
[189]: https://github.com/kubernetes/kubernetes/pull/7760 "Setup generators in factory"
[190]: https://github.com/kubernetes/kubernetes/pull/7737 "Reduce usage of time.After"
[191]: https://github.com/kubernetes/kubernetes/pull/7735 "Remove node status from `componentstatuses` call."
[192]: https://github.com/kubernetes/kubernetes/pull/7614 "React to failure by growing the remaining clusters"
[193]: https://github.com/kubernetes/kubernetes/pull/7725 "Fix typo in runtime_cache.go"
[194]: https://github.com/kubernetes/kubernetes/pull/7740 "Update non-GCE Salt distros to 1.6.0, fallback to ContainerVM Docker version on GCE"
[195]: https://github.com/kubernetes/kubernetes/pull/7744 "Skip SaltStack install if it's already installed"
[196]: https://github.com/kubernetes/kubernetes/pull/7712 "Expose pod name as a label on containers."
[197]: https://github.com/kubernetes/kubernetes/pull/7732 "Log which SSH key is used in e2e SSH test"
[198]: https://github.com/kubernetes/kubernetes/pull/7649 "Add a central simple getting started guide with kubernetes guide."
[199]: https://github.com/kubernetes/kubernetes/pull/7443 "Explicitly state the lack of support for 'Requests' for the purposes of scheduling"
[200]: https://github.com/kubernetes/kubernetes/pull/7721 "Select IPv4-only from host interfaces"
[201]: https://github.com/kubernetes/kubernetes/pull/7723 "Metrics tests can't run on Mac"
[202]: https://github.com/kubernetes/kubernetes/pull/7727 "Add step to API changes doc for swagger regen"
[203]: https://github.com/kubernetes/kubernetes/pull/7703 "Add NsenterMounter mount implementation"
[204]: https://github.com/kubernetes/kubernetes/pull/7509 "add StringSet.HasAny"
[205]: https://github.com/kubernetes/kubernetes/pull/6941 "Add an integration test that checks for the metrics we expect to be exported from the master"
[206]: https://github.com/kubernetes/kubernetes/pull/7722 "Minor bash update found by shellcheck.net"
[207]: https://github.com/kubernetes/kubernetes/pull/7536 "Add --hostport to run-container."
[208]: https://github.com/kubernetes/kubernetes/pull/7659 "Have rkt implement the container Runtime interface"
[209]: https://github.com/kubernetes/kubernetes/pull/7629 "Change the order the different versions of API are registered "
[210]: https://github.com/kubernetes/kubernetes/pull/7699 "expose: Create objects in a generic way"
[211]: https://github.com/kubernetes/kubernetes/pull/7643 "Requeue rc if a single get/put retry on status.Replicas fails"
[212]: https://github.com/kubernetes/kubernetes/pull/7316 "logs for master components"
[213]: https://github.com/kubernetes/kubernetes/pull/7522 "cloudproviders: add ovirt getting started guide"
[214]: https://github.com/kubernetes/kubernetes/pull/7671 "Make rkt-install a oneshot."
[215]: https://github.com/kubernetes/kubernetes/pull/7665 "Provide container_runtime flag to Kubelet in CoreOS."
[216]: https://github.com/kubernetes/kubernetes/pull/7654 "Boilerplate speedup"
[217]: https://github.com/kubernetes/kubernetes/pull/7700 "Log host for failed pod in Density test"
[218]: https://github.com/kubernetes/kubernetes/pull/7655 "Removes spurious quotation mark"
[219]: https://github.com/kubernetes/kubernetes/pull/7694 "Add kubectl_label to custom functions in bash completion"
[220]: https://github.com/kubernetes/kubernetes/pull/7696 "Enable profiling in kube-controller"
[221]: https://github.com/kubernetes/kubernetes/pull/7690 "Set vagrant test cluster default NUM_MINIONS=2"
[222]: https://github.com/kubernetes/kubernetes/pull/7695 "Add metrics to measure cache hit ratio"
[223]: https://github.com/kubernetes/kubernetes/pull/7662 "Change IP to IP(S) in service columns for kubectl get"
[224]: https://github.com/kubernetes/kubernetes/pull/7076 "annotate required flags for bash_completions"
[225]: https://github.com/kubernetes/kubernetes/pull/7685 "(minor) Add pgrep debugging to etcd error"
[226]: https://github.com/kubernetes/kubernetes/pull/7676 "Fixed nil pointer issue in describe when volume is unbound"
[227]: https://github.com/kubernetes/kubernetes/pull/7691 "Removed unnecessary closing bracket"
[228]: https://github.com/kubernetes/kubernetes/pull/7432 "Added TerminationGracePeriod field to PodSpec and grace-period flag to kubectl stop"
[229]: https://github.com/kubernetes/kubernetes/pull/7689 "Fix boilerplate in test/e2e/scale.go"
[230]: https://github.com/kubernetes/kubernetes/pull/7628 "Update expiration timeout based on observed latencies"
[231]: https://github.com/kubernetes/kubernetes/pull/7644 "Output generated conversion functions/names"
[232]: https://github.com/kubernetes/kubernetes/issues/7645 "Move the scale tests into a separate file"
[233]: https://github.com/kubernetes/kubernetes/pull/7646 "Moved the Scale tests into a scale file. #7645"
[234]: https://github.com/kubernetes/kubernetes/pull/7609 "Truncate GCE load balancer names to 63 chars"
[235]: https://github.com/kubernetes/kubernetes/pull/7603 "Add SyncPod() and remove Kill/Run InContainer()."
[236]: https://github.com/kubernetes/kubernetes/pull/7663 "Merge release 0.16 to master"
[237]: https://github.com/kubernetes/kubernetes/pull/7637 "Update license boilerplate for examples/rethinkdb"
[238]: https://github.com/kubernetes/kubernetes/pull/7268 "First part of improved rolling update, allow dynamic next replication controller generation."
[239]: https://github.com/kubernetes/kubernetes/pull/7638 "Add license boilerplate to examples/phabricator"
[240]: https://github.com/kubernetes/kubernetes/pull/7597 "Use generic copyright holder name in license boilerplate"
[241]: https://github.com/kubernetes/kubernetes/pull/7633 "Retry incrementing quota if there is a conflict"
[242]: https://github.com/kubernetes/kubernetes/pull/7568 "Remove GetContainers from Runtime interface"
[243]: https://github.com/kubernetes/kubernetes/pull/7578 "Add image-related methods to DockerManager"
[244]: https://github.com/kubernetes/kubernetes/pull/7586 "Remove more docker references in kubelet"
[245]: https://github.com/kubernetes/kubernetes/pull/7601 "Add KillContainerInPod in DockerManager"
[246]: https://github.com/kubernetes/kubernetes/pull/7652 "Kubelet: Add container runtime option."
[247]: https://github.com/kubernetes/kubernetes/pull/7626 "bump heapster to v0.11.0 and grafana to v0.7.0"
[248]: https://github.com/kubernetes/kubernetes/pull/7593 "Build github.com/onsi/ginkgo/ginkgo as a part of the release"
[249]: https://github.com/kubernetes/kubernetes/pull/7490 "Do not automatically decode runtime.RawExtension"
[250]: https://github.com/kubernetes/kubernetes/pull/7500 "Update changelog."
[251]: https://github.com/kubernetes/kubernetes/pull/7610 "Add SyncPod() to DockerManager and use it in Kubelet"
[252]: https://github.com/kubernetes/kubernetes/pull/7602 "Build: Push .md5 and .sha1 files for every file we push to GCS"
[253]: https://github.com/kubernetes/kubernetes/pull/7540 "Fix rolling update --image "
[254]: https://github.com/kubernetes/kubernetes/pull/7636 "Update license boilerplate for docs/man/md2man-all.sh"
[255]: https://github.com/kubernetes/kubernetes/pull/7632 "Include shell license boilerplate in examples/k8petstore"
[256]: https://github.com/kubernetes/kubernetes/pull/7277 "Add --cgroup_parent flag to Kubelet to set the parent cgroup for pods"
[257]: https://github.com/kubernetes/kubernetes/pull/7209 "change the current dir to the config dir"
[258]: https://github.com/kubernetes/kubernetes/pull/7158 "Set Weave To 0.9.0 And Update Etcd Configuration For Azure"
[259]: https://github.com/kubernetes/kubernetes/pull/7467 "Augment describe to search for matching things if it doesn't match the original resource."
[260]: https://github.com/kubernetes/kubernetes/pull/7559 "Add a simple cache for objects stored in etcd."
[261]: https://github.com/kubernetes/kubernetes/pull/7549 "Rkt gc"
[262]: https://github.com/kubernetes/kubernetes/pull/7550 "Rkt pull"
[263]: https://github.com/kubernetes/kubernetes/pull/6400 "Implement Mount interface using mount(8) and umount(8)"
[264]: https://github.com/kubernetes/kubernetes/pull/7588 "Trim Fleuntd tag for Cloud Logging"
[265]: https://github.com/kubernetes/kubernetes/pull/7569 "GCE CoreOS cluster - set master name based on variable"
[266]: https://github.com/kubernetes/kubernetes/pull/7535 "Capitalization of KubeProxyVersion wrong in JSON"
[267]: https://github.com/kubernetes/kubernetes/pull/7530 "Make nodes report their external IP rather than the master's."
[268]: https://github.com/kubernetes/kubernetes/pull/7539 "Trim cluster log tags to pod name and container name"
[269]: https://github.com/kubernetes/kubernetes/pull/7541 "Handle conversion of boolean query parameters with a value of `false`"
[270]: https://github.com/kubernetes/kubernetes/pull/7532 "Add image-related methods to Runtime interface."
[271]: https://github.com/kubernetes/kubernetes/pull/7560 "Test whether auto-generated conversions weren't manually edited"
[272]: https://github.com/kubernetes/kubernetes/pull/7484 "Mention :latest behavior for image version tag"
[273]: https://github.com/kubernetes/kubernetes/pull/7487 "readinessProbe calls livenessProbe.Exec.Command which cause `invalid memory address or nil pointer dereference`."
[274]: https://github.com/kubernetes/kubernetes/pull/7520 "Add RuntimeHooks to abstract Kubelet logic"
[275]: https://github.com/kubernetes/kubernetes/pull/7546 "Expose URL() on Request to allow building URLs"
[276]: https://github.com/kubernetes/kubernetes/pull/7288 "Add a simple cache for objects stored in etcd"
[277]: https://github.com/kubernetes/kubernetes/pull/7431 "Prepare for chaining autogenerated conversion methods "
[278]: https://github.com/kubernetes/kubernetes/pull/7353 "Increase maxIdleConnection limit when creating etcd client in apiserver."
[279]: https://github.com/kubernetes/kubernetes/pull/7354 "Improvements to generator of conversion methods."
[280]: https://github.com/kubernetes/kubernetes/pull/7107 "Code to automatically generate conversion methods"
[281]: https://github.com/kubernetes/kubernetes/pull/7407 "Support recovery for anonymous roll outs"
[282]: https://github.com/kubernetes/kubernetes/pull/7527 "Bump kube2sky to 1.2. Point it at https endpoint (3rd try)."
[283]: https://github.com/kubernetes/kubernetes/pull/7526 "cluster/gce/coreos: Add metadata-service in node.yaml"
[284]: https://github.com/kubernetes/kubernetes/pull/7480 "Move ComputePodChanges to the Docker runtime"
[285]: https://github.com/kubernetes/kubernetes/pull/7510 "Cobra rebase"
[286]: https://github.com/kubernetes/kubernetes/pull/6718 "Adding system oom events from kubelet"
[287]: https://github.com/kubernetes/kubernetes/pull/7479 "Move Prober to its own subpackage"
[288]: https://github.com/kubernetes/kubernetes/pull/7513 "Fix parallel-e2e.sh to work on my macbook (bash v3.2)"
[289]: https://github.com/kubernetes/kubernetes/pull/7449 "Move network plugin TearDown to DockerManager"
[290]: https://github.com/kubernetes/kubernetes/issues/7498 "CoreOS Getting Started Guide not working"
[291]: https://github.com/kubernetes/kubernetes/pull/7499 "Fixes #7498 - CoreOS Getting Started Guide had invalid cloud config"
[292]: https://github.com/kubernetes/kubernetes/pull/7504 "Fix invalid character &quot; after object key:value pair"
[293]: https://github.com/kubernetes/kubernetes/issues/7317 "GlusterFS Volume Plugin deletes the contents of the mounted volume upon Pod deletion"
[294]: https://github.com/kubernetes/kubernetes/pull/7503 "Fixed kubelet deleting data from volumes on stop (#7317)."
[295]: https://github.com/kubernetes/kubernetes/pull/7482 "Fixing hooks/description to catch API fields without description tags"
[296]: https://github.com/kubernetes/kubernetes/pull/7457 "cadvisor is obsoleted so kubelet service does not require it."
[297]: https://github.com/kubernetes/kubernetes/pull/7408 "Set the default namespace for events to be &quot;default&quot;"
[298]: https://github.com/kubernetes/kubernetes/pull/7446 "Fix typo in namespace conversion"
[299]: https://github.com/kubernetes/kubernetes/pull/7419 "Convert Secret registry to use update/create strategy, allow filtering by Type"
[300]: https://github.com/kubernetes/kubernetes/pull/7102 "Use pod namespace when looking for its GlusterFS endpoints."
[301]: https://github.com/kubernetes/kubernetes/pull/7427 "Fixed name of kube-proxy path in deployment scripts."

View File

@ -10,7 +10,7 @@ evergreen: true
Since the Kubernetes 1.0 release in July, weve seen tremendous adoption by companies building distributed systems to manage their container clusters. Were also been humbled by the rapid growth of the community who help make Kubernetes better everyday. We have seen commercial offerings such as Tectonic by CoreOS and RedHat Atomic Host emerge to deliver deployment and support of Kubernetes. And a growing ecosystem has added Kubernetes support including tool vendors such as Sysdig and Project Calico.
With the help of hundreds of contributors, were proud to announce the availability of Kubernetes 1.1, which offers major performance upgrades, improved tooling, and new features that make applications even easier to build and deploy.
With the help of hundreds of contributors, were proud to announce the availability of Kubernetes 1.1, which offers major performance upgrades, improved tooling, and new features that make applications even easier to build and deploy.
Some of the work wed like to highlight includes:
@ -32,9 +32,9 @@ Some of the work wed like to highlight includes:
Today, were also proud to mark the inaugural Kubernetes conference, [KubeCon](https://kubecon.io/), where some 400 community members along with dozens of vendors are in attendance supporting the Kubernetes project.
Today, were also proud to mark the inaugural Kubernetes conference, [KubeCon](https://kubecon.io/), where some 400 community members along with dozens of vendors are in attendance supporting the Kubernetes project.
Wed love to highlight just a few of the many partners making Kubernetes better:
Wed love to highlight just a few of the many partners making Kubernetes better:
> “We are betting our major product, Tectonic which enables any company to deploy, manage and secure its containers anywhere on Kubernetes because we believe it is the future of the data center. The release of Kubernetes 1.1 is another major milestone that will create more widespread adoption of distributed systems and containers, and puts us on a path that will inevitably lead to a whole new generation of products and services.” Alex Polvi, CEO, CoreOS.
@ -42,15 +42,12 @@ Wed love to highlight just a few of the many partners making Kubernetes bette
> “The tremendous customer demand were seeing to run containers at scale with Kubernetes is a critical element driving growth in our professional services business at Redapt. As a trusted advisor, its great to have a tool like Kubernetes in our tool belt to help our customers achieve their objectives.” Paul Welch, SR VP Cloud Solutions, Redapt
>
As we mentioned above, we would love your help:
As we mentioned above, we would love your help:
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)&nbsp;
- Connect with the community on [Slack](http://slack.kubernetes.io/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates&nbsp;
- Post questions (or answer questions) on Stackoverflow&nbsp;
- Post questions (or answer questions) on StackOverflow
- Get started running, deploying, and using Kubernetes [guides](/docs/tutorials/kubernetes-basics/);
But, most of all, just let us know how you are transforming your business using Kubernetes, and how we can help you do it even faster. Thank you for your support!
But, most of all, just let us know how you are transforming your business using Kubernetes, and how we can help you do it even faster. Thank you for your support!

View File

@ -3,9 +3,9 @@ title: " Fission: Serverless Functions as a Service for Kubernetes "
date: 2017-01-30
slug: fission-serverless-functions-as-service-for-kubernetes
url: /blog/2017/01/Fission-Serverless-Functions-As-Service-For-Kubernetes
author: >
Soam Vasani (Platform9 Systems)
---
_Editor's note: Todays post is by Soam Vasani, Software Engineer at Platform9 Systems, talking about a new open source Serverless Function (FaaS) framework for Kubernetes._&nbsp;
[Fission](https://github.com/fission/fission) is a Functions as a Service (FaaS) / Serverless function framework built on Kubernetes.
Fission allows you to easily create HTTP services on Kubernetes from functions. It works at the source level and abstracts away container images (in most cases). It also simplifies the Kubernetes learning curve, by enabling you to make useful services without knowing much about Kubernetes.
@ -127,6 +127,3 @@ Fission is open source and developed in the open by [Platform9 Systems](http://p
- Connect with the community on [Slack](http://slack.k8s.io/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
_--Soam Vasani, Software Engineer, Platform9 Systems_

View File

@ -3,9 +3,10 @@ title: " How we run Kubernetes in Kubernetes aka Kubeception "
date: 2017-01-20
slug: how-we-run-kubernetes-in-kubernetes-kubeception
url: /blog/2017/01/How-We-Run-Kubernetes-In-Kubernetes-Kubeception
author: >
Hector Fernandez (Giant Swarm)
Puja Abbassi (Giant Swarm)
---
_Editor's note: Todays post is by the team at Giant Swarm, showing how they run Kubernetes in Kubernetes._
[Giant Swarm](https://giantswarm.io/)s container infrastructure started out with the goal to be an easy way for developers to deploy containerized microservices. Our first generation was extensively using [fleet](https://github.com/coreos/fleet) as a base layer for our infrastructure components as well as for scheduling user containers.
In order to give our users a more powerful way to manage their containers we introduced Kubernetes into our stack in early 2016. However, as we needed a quick way to flexibly spin up and manage different users Kubernetes clusters resiliently we kept the underlying fleet layer.
@ -119,6 +120,3 @@ This setup is still in its early days and our roadmap is planning for improvemen
Most importantly, we are working on making the inner Kubernetes clusters a third party resource that can then be managed by a custom controller. The result would be much like the [Operator concept by CoreOS](https://coreos.com/blog/introducing-operators.html). And to ensure that the community at large can benefit from this project we will be open sourcing this in the near future.
_-- Hector Fernandez, Software Engineer & Puja Abbassi, Developer Advocate, Giant Swarm_

View File

@ -3,9 +3,9 @@ title: " Kubernetes UX Survey Infographic "
date: 2017-01-09
slug: kubernetes-ux-survey-infographic
url: /blog/2017/01/Kubernetes-Ux-Survey-Infographic
author: >
Dan Romlein (UX Designer)
---
_Editor's note: Todays post is by Dan Romlein, UX Designer at Apprenda and member of the SIG-UI, sharing UX survey results from the Kubernetes community.&nbsp;_
The following infographic summarizes the findings of a survey that the team behind [Dashboard](https://github.com/kubernetes/dashboard), the official web UI for Kubernetes, sent during KubeCon in November 2016. Following the KubeCon launch of the survey, it was promoted on Twitter and various Slack channels over a two week period and generated over 100 responses. Were delighted with the data it provides us to now make feature and roadmap decisions more in-line with the needs of you, our users.
**Satisfaction with Dashboard**

View File

@ -3,8 +3,9 @@ title: " Running MongoDB on Kubernetes with StatefulSets "
date: 2017-01-30
slug: running-mongodb-on-kubernetes-with-statefulsets
url: /blog/2017/01/Running-Mongodb-On-Kubernetes-With-Statefulsets
author: >
Sandeep Dinesh (Google Cloud Platform)
---
_Editor's note: Todays post is by Sandeep Dinesh, Developer Advocate, Google Cloud Platform, showing how to run a database in a container._
{{% warning %}}
This post is several years old. The code examples need changes to work on a current Kubernetes cluster.
@ -260,4 +261,3 @@ Happy Hacking!
For more cool Kubernetes and Container blog posts, follow me on [Twitter](https://twitter.com/sandeepdinesh) and [Medium](https://medium.com/@SandeepDinesh).
_--Sandeep Dinesh, Developer Advocate, Google Cloud Platform._

View File

@ -3,9 +3,9 @@ title: " Scaling Kubernetes deployments with Policy-Based Networking "
date: 2017-01-19
slug: scaling-kubernetes-deployments-with-policy-base-networking
url: /blog/2017/01/Scaling-Kubernetes-Deployments-With-Policy-Base-Networking
author: >
Harmeet Sahni (Nuage Networks)
---
_Editor's note: Todays post is by Harmeet Sahni, Director of Product Management, at Nuage Networks, writing about their contributions to Kubernetes and insights on policy-based networking. &nbsp;_
Although its just been eighteen-months since Kubernetes 1.0 was released, weve seen Kubernetes emerge as the leading container orchestration platform for deploying distributed applications. One of the biggest reasons for this is the vibrant open source community that has developed around it. The large number of Kubernetes contributors come from diverse backgrounds means we, and the community of users, are assured that we are investing in an open platform. Companies like Google (Container Engine), Red Hat (OpenShift), and CoreOS (Tectonic) are developing their own commercial offerings based on Kubernetes. This is a good thing since it will lead to more standardization and offer choice to the users.&nbsp;
**Networking requirements for Kubernetes applications**
@ -54,5 +54,3 @@ Being able to monitor the traffic flowing between Kubernetes Pods is very useful
Even though we started working on our integration with Kubernetes over a year ago, it feels we are just getting started. We have always felt that this is a truly open community and we want to be an integral part of it. You can find out more about our Kubernetes integration on our [GitHub page](https://github.com/nuagenetworks/nuage-kubernetes).
_--Harmeet Sahni, Director of Product Management, Nuage Networks_

View File

@ -3,9 +3,9 @@ title: " A Stronger Foundation for Creating and Managing Kubernetes Clusters "
date: 2017-01-12
slug: stronger-foundation-for-creating-and-managing-kubernetes-clusters
url: /blog/2017/01/Stronger-Foundation-For-Creating-And-Managing-Kubernetes-Clusters
author: >
[Lucas Käldström](https://twitter.com/kubernetesonarm) (independent)
---
_Editor's note: Todays post is by Lucas Käldström an independent Kubernetes maintainer and SIG-Cluster-Lifecycle member, sharing what the group has been building and whats upcoming.&nbsp;_
Last time you heard from us was in September, when we announced [kubeadm](https://kubernetes.io/blog/2016/09/how-we-made-kubernetes-easy-to-install). The work on making kubeadm a first-class citizen in the Kubernetes ecosystem has continued and evolved. Some of us also met before KubeCon and had a very productive meeting where we talked about what the scopes for our SIG, kubeadm, and kops are.&nbsp;
**Continuing to Define SIG-Cluster-Lifecycle**
@ -100,6 +100,3 @@ In short, we're excited on the roadmap ahead in bringing a lot of these improvem
Thank you for all the feedback and contributions. I hope this has given you some insight in what were doing and encouraged&nbsp;you to join us at our meetings to say hi!
_-- [Lucas Käldström](https://twitter.com/kubernetesonarm), Independent Kubernetes maintainer and SIG-Cluster-Lifecycle member_

View File

@ -3,12 +3,9 @@ title: " Containers as a Service, the foundation for next generation PaaS "
date: 2017-02-21
slug: caas-the-foundation-for-next-gen-paas
url: /blog/2017/02/Caas-The-Foundation-For-Next-Gen-Paas
author: >
[Brendan Burns](https://twitter.com/brendandburns) (Microsoft)
---
_Todays post is by Brendan Burns, Partner Architect, at Microsoft & Kubernetes co-founder._
Containers are revolutionizing the way that people build, package and deploy software. But what is often overlooked is how they are revolutionizing the way that people build the software that builds, packages and deploys software. (its ok if you have to read that sentence twice…) Today, and in a talk at [Container World](https://tmt.knect365.com/container-world/) tomorrow, Im taking a look at how container orchestrators like Kubernetes form the foundation for next generation platform as a service (PaaS). In particular, Im interested in how cloud container as a service (CaaS) platforms like [Azure Container Service](https://azure.microsoft.com/en-us/services/container-service/), [Google Container Engine](https://cloud.google.com/container-engine/) and [others](/docs/getting-started-guides/#hosted-solutions) are becoming the new infrastructure layer that PaaS is built upon.
To see this, its important to consider the set of services that have traditionally been provided by PaaS platforms:
@ -36,13 +33,6 @@ Im thrilled to see how containers and container as a service is changing the
_--[Brendan Burns](https://twitter.com/brendandburns), Partner Architect, at Microsoft and co-founder of Kubernetes_
- Get involved with the Kubernetes project on&nbsp;[GitHub](https://github.com/kubernetes/kubernetes)
- Post questions (or answer questions) on&nbsp;[Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- [Download](http://get.k8s.io/) Kubernetes

View File

@ -3,6 +3,8 @@ title: " Highly Available Kubernetes Clusters "
date: 2017-02-02
slug: highly-available-kubernetes-clusters
url: /blog/2017/02/Highly-Available-Kubernetes-Clusters
author: >
Jerzy Szczepkowski (Google)
---
Todays post shows how to set-up a reliable, highly available distributed Kubernetes cluster. The support for running such clusters on Google Compute Engine (GCE) was added as an alpha feature in [Kubernetes 1.5 release](https://kubernetes.io/blog/2016/12/kubernetes-1-5-supporting-production-workloads/).
@ -325,6 +327,3 @@ We have shown how, by adding worker node pools and master replicas, a Highly Ava
- Connect with the community on [Slack](http://slack.k8s.io/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
_--Jerzy Szczepkowski, Software Engineer, Google_

View File

@ -3,6 +3,8 @@ title: " Deploying PostgreSQL Clusters using StatefulSets "
date: 2017-02-24
slug: postgresql-clusters-kubernetes-statefulsets
url: /blog/2017/02/Postgresql-Clusters-Kubernetes-Statefulsets
author: >
Jeff McCormick ([Crunchy Data](http://crunchydata.com/))
---
_Editors note: Todays guest post is by Jeff McCormick, a developer at Crunchy Data, showing how to build a PostgreSQL cluster using the new Kubernetes StatefulSet feature._
@ -306,7 +308,3 @@ The container is designed to create a subdirectory on that path using the pod ho
StatefulSets is an exciting feature added to Kubernetes for container builders that are implementing clustering. The ordinal values assigned to the set provide a very simple mechanism to make clustering decisions when deploying a PostgreSQL cluster.
_--Jeff McCormick, Developer, [Crunchy Data](http://crunchydata.com/)_

View File

@ -3,12 +3,11 @@ title: " Run Deep Learning with PaddlePaddle on Kubernetes "
date: 2017-02-08
slug: run-deep-learning-with-paddlepaddle-on-kubernetes
url: /blog/2017/02/Run-Deep-Learning-With-Paddlepaddle-On-Kubernetes
author: >
Yi Wang ([Baidu Research](http://research.baidu.com/)),
Xiang Li ([CoreOS](https://coreos.com/))
---
_Editor's note: Today's post is a joint post from the deep learning team at Baidu and the etcd team at CoreOS._
**[![](https://3.bp.blogspot.com/-Mwn3FU9hffI/WJk8QBxA6SI/AAAAAAAAA8w/AS5QoMdPTN8bL9jnixlsCXzj1IfYerhRQCLcB/s200/baidu_research_logo_rgb.png)](https://3.bp.blogspot.com/-Mwn3FU9hffI/WJk8QBxA6SI/AAAAAAAAA8w/AS5QoMdPTN8bL9jnixlsCXzj1IfYerhRQCLcB/s1600/baidu_research_logo_rgb.png)**
@ -159,9 +158,6 @@ Another potential improvement is better PaddlePaddle job configuration. Our expe
PaddlePaddle trainers can utilize multiple GPUs to accelerate computations. GPU is not a first class resource in Kubernetes yet. We have to manage GPUs semi-manually. We would love to work with Kubernetes community to improve GPU support to ensure PaddlePaddle runs the best on Kubernetes.
_--Yi Wang, [Baidu Research](http://research.baidu.com/) and Xiang Li, [CoreOS](https://coreos.com/)_
- [Download](http://get.k8s.io/) Kubernetes
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)

View File

@ -3,6 +3,9 @@ title: " Advanced Scheduling in Kubernetes "
date: 2017-03-31
slug: advanced-scheduling-in-kubernetes
url: /blog/2017/03/Advanced-Scheduling-In-Kubernetes
author: >
Ian Lewis (Google),
David Oppenheimer (Google)
---
_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6_
@ -227,6 +230,3 @@ Share your voice at our weekly [community meeting](https://github.com/kubernetes
Many thanks for your contributions.
_--Ian Lewis, Developer Advocate, and David Oppenheimer, Software Engineer, Google_

View File

@ -3,6 +3,10 @@ title: " Dynamic Provisioning and Storage Classes in Kubernetes "
date: 2017-03-29
slug: dynamic-provisioning-and-storage-classes-kubernetes
url: /blog/2017/03/Dynamic-Provisioning-And-Storage-Classes-Kubernetes
author: >
Saad Ali (Google),
Michelle Au (Google),
Matthew De Lio (Google)
---
_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6_
@ -203,8 +207,6 @@ Yes, you can assign a StorageClass to an existing PV by editing the appropriate
**What happens if I delete a PersistentVolumeClaim (PVC)?**
If the volume was dynamically provisioned, then the default reclaim policy is set to “delete”. This means that, by default, when the PVC is deleted, the underlying PV and storage asset will also be deleted. If you want to retain the data stored on the volume, then you must change the reclaim policy from “delete” to “retain” after the PV is provisioned.
_--Saad Ali & Michelle Au, Software Engineers, and Matthew De Lio, Product Manager, Google_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)

View File

@ -3,8 +3,9 @@ title: " The K8sPort: Engaging Kubernetes Community One Activity at a Time "
date: 2017-03-24
slug: k8sport-engaging-the-kubernetes-community
url: /blog/2017/03/K8Sport-Engaging-The-Kubernetes-Community
author: >
Ryan Quackenbush (Apprenda)
---
_Editor's note: Todays post is by Ryan Quackenbush, Advocacy Programs Manager at Apprenda, showing a new community portal for Kubernetes advocates: the K8sPort._
The [**K8sPort**](http://k8sport.org/) is a hub designed to help you, the Kubernetes community, earn credit for the hard work youre putting forth in making this one of the most successful open source projects ever. Back at KubeCon Seattle in November, I [presented](https://youtu.be/LwViH5eLoOI) a lightning talk of a preview of K8sPort.
@ -43,9 +44,3 @@ If youre interested in joining the advocacy hub, please join us at [k8sport.o
For a quick walkthrough on K8sPort authentication and the hub itself, see this quick demo, below.
_--Ryan Quackenbush, Advocacy Programs Manager, Apprenda_

View File

@ -3,8 +3,12 @@ title: " Kubernetes 1.6: Multi-user, Multi-workloads at Scale "
date: 2017-03-28
slug: kubernetes-1.6-multi-user-multi-workloads-at-scale
url: /blog/2017/03/Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale
author: >
Aparna Sinha (Google)
---
Today were announcing the release of Kubernetes 1.6.
_This article is by Aparna Sinha on behalf of the Kubernetes 1.6 release team._
Today were announcing the release of Kubernetes 1.6.
In this release the communitys focus is on scale and automation, to help you deploy multiple workloads to multiple users on a cluster. We are announcing that 5,000 node clusters are supported. We moved dynamic storage provisioning to _stable_. Role-based access control ([RBAC](/docs/reference/access-authn-authz/rbac/)), [kubefed](/docs/tutorials/federation/set-up-cluster-federation-kubefed/), [kubeadm](/docs/getting-started-guides/kubeadm/), and several scheduling features are moving to _beta_. We have also added intelligent defaults throughout to enable greater automation out of the box.
@ -106,7 +110,4 @@ Share your voice at our weekly [community meeting](https://github.com/kubernetes
Many thanks for your contributions and advocacy!
_-- Aparna Sinha, Senior Product Manager,&nbsp;Kubernetes, Google_
_**PS: read this [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6**_

View File

@ -3,6 +3,8 @@ title: " Scalability updates in Kubernetes 1.6: 5,000 node and 150,000 pod clust
date: 2017-03-30
slug: scalability-updates-in-kubernetes-1.6
url: /blog/2017/03/Scalability-Updates-In-Kubernetes-1-6
author: >
Wojciech Tyczynski (Google)
---
_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6_
@ -80,9 +82,5 @@ If you are interested in scalability and performance, please join our community
- Join our Special Interest Group, [SIG-Scalability](https://github.com/kubernetes/community/blob/master/sig-scalability/README.md), which meets every Thursday at 9:00 AM PST
Thanks for the support and contributions! Read more in-depth posts on what's new in Kubernetes 1.6 [here](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6).
_-- Wojciech Tyczynski, Software Engineer, Google_
[1] We are investigating why 5000-node clusters have better startup time than 2000-node clusters. The current theory is that it is related to running 5000-node experiments using 64-core master and 2000-node experiments using 32-core master.

View File

@ -3,6 +3,9 @@ title: " Configuring Private DNS Zones and Upstream Nameservers in Kubernetes "
date: 2017-04-04
slug: configuring-private-dns-zones-upstream-nameservers-kubernetes
url: /blog/2017/04/Configuring-Private-Dns-Zones-Upstream-Nameservers-Kubernetes
author: >
Bowei Du (Google),
Matthew DeLio (Google)
---
_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6_
@ -138,13 +141,6 @@ If youd like to contribute or simply help provide feedback and drive the road
Thanks for your support and contributions. Read more in-depth posts on what's new in Kubernetes 1.6 [here](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6).
_--Bowei Du, Software Engineer and Matthew DeLio, Product Manager, Google_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)

View File

@ -3,9 +3,9 @@ title: " How Bitmovin is Doing Multi-Stage Canary Deployments with Kubernetes in
date: 2017-04-21
slug: multi-stage-canary-deployments-with-kubernetes-in-the-cloud-onprem
url: /blog/2017/04/Multi-Stage-Canary-Deployments-With-Kubernetes-In-The-Cloud-Onprem
author: >
Daniel Hoelbling-Inzko (Bitmovin)
---
_Editor's Note: Todays post is by Daniel Hoelbling-Inzko, Infrastructure Architect at Bitmovin, a company that provides services that transcode digital video and audio to streaming formats, sharing insights about their use of Kubernetes._
Running a large scale video encoding infrastructure on multiple public clouds is tough. At [Bitmovin](http://bitmovin.com/), we have been doing it successfully for the last few years, but from an engineering perspective, its neither been enjoyable nor particularly fun.
So obviously, one of the main things that really sold us on using Kubernetes, was its common abstraction from the different supported cloud providers and the well thought out programming interface it provides. More importantly, the Kubernetes project did not settle for the lowest common denominator approach. Instead, they added the necessary abstract concepts that are required and useful to run containerized workloads in a cloud and then did all the hard work to map these concepts to the different cloud providers and their offerings.
@ -206,11 +206,6 @@ To summarize this post - by migrating our infrastructure to Kubernetes, Bitmovin
We want to thank the Kubernetes community for the incredible job they have done with the project. The velocity at which the project moves is just breathtaking! Maintaining such a high level of quality and robustness in such a diverse environment is really astonishing.
_--Daniel Hoelbling-Inzko, Infrastructure Architect, Bitmovin_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)

View File

@ -3,6 +3,10 @@ title: " RBAC Support in Kubernetes "
date: 2017-04-06
slug: rbac-support-in-kubernetes
url: /blog/2017/04/Rbac-Support-In-Kubernetes
author: >
Jacob Simpson (Google),
Greg Castle (Google),
CJ Cullen (Google)
---
_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1-6) on what's new in Kubernetes 1.6_
@ -116,14 +120,6 @@ Thanks for your support and contributions. Read more in-depth posts on what's ne
_-- Jacob Simpson, Greg Castle & CJ Cullen, Software Engineers at Google_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)

View File

@ -3,9 +3,9 @@ title: " Draft: Kubernetes container development made easy "
date: 2017-05-31
slug: draft-kubernetes-container-development
url: /blog/2017/05/Draft-Kubernetes-Container-Development
author: >
Brendan Burns (Microsoft Azure)
---
_Today's post is by __Brendan Burns, Director of Engineering at Microsoft Azure and Kubernetes co-founder._
About a month ago Microsoft announced the acquisition of Deis to expand our expertise in containers and Kubernetes. Today, Im excited to announce a new open source project derived from this newly expanded Azure team: Draft.
While by now the strengths of Kubernetes for deploying and managing applications at scale are well understood. The process of developing a new application for Kubernetes is still too hard. Its harder still if you are new to containers, Kubernetes, or developing cloud applications.
@ -182,15 +182,6 @@ Now when we run curl http://$SERVICE\_IP, our first app has been deployed and up
We hope this gives you a sense for everything that Draft can do to streamline development for Kubernetes. Happy drafting!
_--Brendan Burns, Director of Engineering, Microsoft Azure_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates

View File

@ -3,10 +3,9 @@ title: " Kubernetes: a monitoring guide "
date: 2017-05-19
slug: kubernetes-monitoring-guide
url: /blog/2017/05/Kubernetes-Monitoring-Guide
author: >
Jean-Mathieu Saponaro (Datadog)
---
_Todays post is by Jean-Mathieu Saponaro, Research & Analytics Engineer at Datadog, discussing what Kubernetes changes for monitoring, and how you can prepare to properly monitor a containerized infrastructure orchestrated by Kubernetes._
Container technologies are taking the infrastructure world by storm. While containers solve or simplify infrastructure management processes, they also introduce significant complexity in terms of orchestration. Thats where Kubernetes comes to our rescue. Just like a conductor directs an orchestra, [Kubernetes](/docs/concepts/overview/what-is-kubernetes/) oversees our ensemble of containers—starting, stopping, creating, and destroying them automatically to keep our applications humming along.
Kubernetes makes managing a containerized infrastructure much easier by creating levels of abstractions such as [pods](/docs/concepts/workloads/pods/pod/) and [services](/docs/concepts/services-networking/service/). We no longer have to worry about where applications are running or if they have enough resources to work properly. But that doesnt change the fact that, in order to ensure good performance, we need to monitor our applications, the containers running them, and Kubernetes itself.
@ -73,11 +72,6 @@ Using Kubernetes drastically simplifies container management. But it requires us
&nbsp;
_--Jean-Mathieu Saponaro, Research & Analytics Engineer, Datadog_
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)&nbsp;
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)&nbsp;
- Connect with the community on [Slack](http://slack.k8s.io/)

View File

@ -3,10 +3,10 @@ title: " Dancing at the Lip of a Volcano: The Kubernetes Security Process - Expl
date: 2017-05-18
slug: kubernetes-security-process-explained
url: /blog/2017/05/Kubernetes-Security-Process-Explained
author: >
Brandon Philips (CoreOS),
Jess Frazelle (Google)
---
_Editor's note: Todays post is by&nbsp; __Jess Frazelle of Google and Brandon Philips of CoreOS about the Kubernetes security disclosures and response policy.__ &nbsp;_
Software running on servers underpins ever growing amounts of the world's commerce, communications, and physical infrastructure. And nearly all of these systems are connected to the internet; which means vital security updates must be applied rapidly. As software developers and IT professionals, we often find ourselves dancing on the edge of a volcano: we may either fall into magma induced oblivion from a security vulnerability exploited before we can fix it, or we may slide off the side of the mountain because of an inadequate process to address security vulnerabilities.&nbsp;
The Kubernetes community believes that we can help teams restore their footing on this volcano with a foundation built on Kubernetes. And the bedrock of this foundation requires a process for quickly acknowledging, patching, and releasing security updates to an ever growing community of Kubernetes users.&nbsp;
@ -26,10 +26,7 @@ As we [continue to harden Kubernetes](https://lwn.net/Articles/720215/), the sec
As a thank you to the Kubernetes community, a special 25 percent discount to CoreOS Fest is available using k8s25code&nbsp;or via this special [25 percent off link](https://coreosfest17.eventbrite.com/?discount=k8s25code) to register today for CoreOS Fest 2017.&nbsp;
_--Brandon Philips of CoreOS and Jess Frazelle of Google_
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates

View File

@ -3,8 +3,9 @@ title: " Kubespray Ansible Playbooks foster Collaborative Kubernetes Ops "
date: 2017-05-19
slug: kubespray-ansible-collaborative-kubernetes-ops
url: /blog/2017/05/Kubespray-Ansible-Collaborative-Kubernetes-Ops
author: >
Rob Hirschfeld (RackN)
---
_Todays guest post is by Rob Hirschfeld, co-founder of open infrastructure automation project, Digital Rebar and co-chair of the SIG Cluster Ops. &nbsp;_
**Why Kubespray?**
@ -107,11 +108,6 @@ With Kubespray and Digital Rebar as a repeatable base, extensions get much faste
If this is interesting, please engage with us in the [Cluster Ops SIG](https://github.com/kubernetes/community/tree/master/sig-cluster-ops), [Kubespray](https://github.com/kubernetes-incubator/kubespray)&nbsp;or [Digital Rebar](http://rebar.digital/) communities.&nbsp;
_-- Rob Hirschfeld, co-founder of RackN and co-chair of the Cluster Ops SIG_
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)

View File

@ -3,8 +3,13 @@ title: " Kubernetes 1.7: Security Hardening, Stateful Application Updates and Ex
date: 2017-06-30
slug: kubernetes-1.7-security-hardening-stateful-application-extensibility-updates
url: /blog/2017/06/Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates
author: >
Aparna Sinha (Google),
Ihor Dvoretskyi (Mirantis)
---
Today were announcing Kubernetes 1.7, a milestone release that adds security, storage and extensibility features motivated by widespread production use of Kubernetes in the most demanding enterprise environments.&nbsp;
_This article is by Aparna Sinha and Ihor Dvoretskyi, on behalf of the Kubernetes 1.7 release team._
Today were announcing Kubernetes 1.7, a milestone release that adds security, storage and extensibility features motivated by widespread production use of Kubernetes in the most demanding enterprise environments.
At-a-glance, security enhancements in this release include encrypted secrets, network policy for pod-to-pod communication, node authorizer to limit kubelet access and client / server TLS certificate rotation.&nbsp;
@ -77,5 +82,3 @@ The simplest way to get involved is joining one of the many [Special Interest Gr
Many thanks to our vast community of contributors and supporters in making this and all releases possible.
_-- Aparna Sinha, Group Product Manager, Kubernetes Google and Ihor Dvoretskyi, Program Manager, Kubernetes Mirantis_

View File

@ -3,6 +3,8 @@ title: " How Watson Health Cloud Deploys Applications with Kubernetes "
date: 2017-07-14
slug: how-watson-health-cloud-deploys
url: /blog/2017/07/How-Watson-Health-Cloud-Deploys
author: >
Sandhya Kapoor (IBM)
---
Todays post is by [Sandhya Kapoor](https://www.linkedin.com/in/sandhyakapoor/), Senior Technologist, Watson Platform for Health, IBM
@ -141,11 +143,6 @@ Exposing services with Ingress:
To expose our services to outside the cluster, we used Ingress. In IBM Cloud Kubernetes Service, if we create a paid cluster, an Ingress controller is automatically installed for us to use. We were able to access services through Ingress by creating a YAML resource file that specifies the service path.
Sandhya Kapoor, Senior Technologist, Watson Platform for Health, IBM
- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on [K8sPort](http://k8sport.org/)
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates

View File

@ -3,6 +3,9 @@ title: " High Performance Networking with EC2 Virtual Private Clouds "
date: 2017-08-11
slug: high-performance-networking-with-ec2
url: /blog/2017/08/High-Performance-Networking-With-Ec2
author: >
Juergen Brendel (Pani Networks)
Chris Marino (Pani Networks)
---
@ -71,7 +74,3 @@ When using Romana v2.0, native VPC networking is now available for clusters of a
![](https://archive.org/download/hpc-ec2-vpc-2/hpc-ec2-vpc-2.png)
-- _Juergen Brendel and Chris Marino, co-founders of Pani Networks, sponsor of the Romana project_

View File

@ -3,8 +3,9 @@ title: " Kompose Helps Developers Move Docker Compose Files to Kubernetes "
date: 2017-08-10
slug: kompose-helps-developers-move-docker
url: /blog/2017/08/Kompose-Helps-Developers-Move-Docker
author: >
Charlie Drage (Red Hat)
---
_Editor's note: today's post is by Charlie Drage, Software Engineer at Red Hat giving an update about the Kubernetes project Kompose._
I'm pleased to announce that [Kompose](https://github.com/kubernetes/kompose), a conversion tool for developers to transition Docker Compose applications to Kubernetes, has graduated from the [Kubernetes Incubator](https://github.com/kubernetes/community/blob/master/incubator.md) to become an official part of the project.
@ -147,10 +148,6 @@ As we continue development, we will strive to convert as many Docker Compose key
- [Kompose Documentation](https://github.com/kubernetes/kompose/tree/master/docs)
--Charlie Drage, Software Engineer, Red Hat
- Post questions (or answer questions) on[Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)
- Join the community portal for advocates on[K8sPort](http://k8sport.org/)
- Follow us on Twitter[@Kubernetesio](https://twitter.com/kubernetesio) for latest updates

View File

@ -3,8 +3,9 @@ title: " Kubernetes Meets High-Performance Computing "
date: 2017-08-22
slug: kubernetes-meets-high-performance
url: /blog/2017/08/Kubernetes-Meets-High-Performance
author: >
Robert Lalonde (Univa)
---
Editor's note: today's post is by Robert Lalonde, general manager at Univa, on supporting mixed HPC and containerized applications &nbsp;
Anyone who has worked with Docker can appreciate the enormous gains in efficiency achievable with containers. While Kubernetes excels at orchestrating containers, high-performance computing (HPC) applications can be tricky to deploy on Kubernetes.

View File

@ -3,8 +3,9 @@ title: " Introducing the Resource Management Working Group "
date: 2017-09-21
slug: introducing-resource-management-working
url: /blog/2017/09/Introducing-Resource-Management-Working
author: >
Jeremy Eder (Red Hat)
---
_**Editor's note: today's post is by Jeremy Eder, Senior Principal Software Engineer at Red Hat, on the formation of the Resource Management Working Group**_
## Why are we here?
Kubernetes has evolved to support diverse and increasingly complex classes of applications. We can onboard and scale out modern, cloud-native web applications based on microservices, batch jobs, and stateful applications with persistent storage requirements.

View File

@ -4,11 +4,10 @@ date: 2017-09-29
slug: kubernetes-18-security-workloads-and
url: /blog/2017/09/Kubernetes-18-Security-Workloads-And
evergreen: true
author: >
[Kubernetes v1.8 Release Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.8/release_team.md)
---
**Authors:** Kubernetes v1.8 release team
Were pleased to announce the delivery of Kubernetes 1.8, our third release this year. Kubernetes 1.8 represents a snapshot of many exciting enhancements and refinements underway. In addition to functional improvements, were increasing project-wide focus on maturing [process](https://github.com/kubernetes/sig-release), formalizing [architecture](https://github.com/kubernetes/community/tree/master/sig-architecture), and strengthening Kubernetes [governance model](https://github.com/kubernetes/community/tree/master/community/elections/2017). The evolution of mature processes clearly signals that sustainability is a driving concern, and helps to ensure that Kubernetes is a viable and thriving project far into the future.

View File

@ -4,9 +4,10 @@ title: " Kubernetes StatefulSets & DaemonSets Updates "
date: 2017-09-27
slug: kubernetes-statefulsets-daemonsets
url: /blog/2017/09/Kubernetes-Statefulsets-Daemonsets
author: >
Janet Kuo (Google),
Kenneth Owens (Kenneth Owens)
---
Editor's note: today's post is by Janet Kuo and Kenneth Owens, Software Engineers at Google.
This post talks about recent updates to the [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) and [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) API objects for Kubernetes. We explore these features using [Apache ZooKeeper](https://zookeeper.apache.org/) and [Apache Kafka](https://kafka.apache.org/) StatefulSets and a [Prometheus node exporter](https://github.com/prometheus/node_exporter) DaemonSet.

View File

@ -3,8 +3,9 @@ title: " Windows Networking at Parity with Linux for Kubernetes "
date: 2017-09-08
slug: windows-networking-at-parity-with-linux
url: /blog/2017/09/Windows-Networking-At-Parity-With-Linux
author: >
Jason Messer (Microsoft)
---
_**Editor's note: today's post is by Jason Messer, Principal PM Manager at Microsoft, on improvements to the Windows network stack to support the Kubernetes CNI model.**_
Since I last blogged about [Kubernetes Networking for Windows](https://blogs.technet.microsoft.com/networking/2017/04/04/windows-networking-for-kubernetes/) four months ago, the Windows Core Networking team has made tremendous progress in both the platform and open source Kubernetes projects. With the updates, Windows is now on par with Linux in terms of networking. Customers can now deploy mixed-OS, Kubernetes clusters in any environment including Azure, on-premises, and on 3rd-party cloud stacks with the same network primitives and topologies supported on Linux without any workarounds, “hacks”, or 3rd-party switch extensions.

View File

@ -3,8 +3,10 @@ title: " Enforcing Network Policies in Kubernetes "
date: 2017-10-30
slug: enforcing-network-policies-in-kubernetes
url: /blog/2017/10/Enforcing-Network-Policies-In-Kubernetes
author: >
Ahmet Alp Balkan (Google)
---
_**Editor's note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8. Todays post comes from Ahmet Alp Balkan, Software Engineer, Google.**_
_**Editor's note:** this post is part of a [series of in-depth articles](/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8._

View File

@ -3,8 +3,10 @@ title: " kubeadm v1.8 Released: Introducing Easy Upgrades for Kubernetes Cluste
date: 2017-10-25
slug: kubeadm-v18-released
url: /blog/2017/10/Kubeadm-V18-Released
author: >
Lucas Käldström (Weaveworks)
---
**_Editors note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8_**
_**Editor's note:** this post is part of a [series of in-depth articles](/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8._
Since its debut in [September 2016](https://kubernetes.io/blog/2016/09/how-we-made-kubernetes-easy-to-install), the Cluster Lifecycle Special Interest Group (SIG) has established kubeadm as the easiest Kubernetes bootstrap method. Now, were releasing kubeadm v1.8.0 in tandem with the release of [Kubernetes v1.8.0](https://kubernetes.io/blog/2017/09/kubernetes-18-security-workloads-and). In this blog post, Ill walk you through the changes weve made to kubeadm since the last update, the scope of kubeadm, and how you can contribute to this effort.
@ -99,7 +101,3 @@ If you want to get involved in these efforts, join SIG Cluster Lifecycle. We [me
If you want to know what a kubeadm developer does at a given time in the Kubernetes release cycle, check out [this doc](https://github.com/kubernetes/kubeadm/blob/master/docs/release-cycle.md). Finally, dont hesitate to join if any of our upcoming projects are of interest to you!
Thank you,
Lucas Käldström
Kubernetes maintainer & SIG Cluster Lifecycle co-lead
[Weaveworks](https://www.weave.works/?utm_source=k8&utm_medium=ww&utm_campaign=blog) contractor

View File

@ -3,8 +3,12 @@ title: " Request Routing and Policy Management with the Istio Service Mesh "
date: 2017-10-10
slug: request-routing-and-policy-management
url: /blog/2017/10/Request-Routing-And-Policy-Management
author: >
Frank Budinsky (IBM),
Andra Cismaru (Google),
Israel Shalom (Google)
---
**_Editor's note: Todays post by Frank Budinsky, Software Engineer, IBM, Andra Cismaru, Software Engineer, Google, and Israel Shalom, Product Manager, Google, is the second post in a three-part series on Istio. It offers a closer look at request routing and policy management._**
_**Editor's note:** Todays post is the second post in a three-part series on Istio._
In a [previous article](https://kubernetes.io/blog/2017/05/managing-microservices-with-istio-service-mesh), we looked at a [simple application (Bookinfo)](https://istio.io/docs/guides/bookinfo.html) that is composed of four separate microservices. The article showed how to deploy an application with Kubernetes and an Istio-enabled cluster without changing any application code. The article also outlined how to view Istio provided L7 metrics on the running services.
@ -447,4 +451,4 @@ Stopping the load generator means the limit will no longer be exceeded: the blac
## Summary
Weve shown you how to introduce advanced features like HTTP request routing and policy injection into a service mesh configured with Istio without restarting any of the services. This lets you develop and deploy without worrying about the ongoing management of the service mesh; service-wide policies can always be added later.
In the next and last installment of this series, well focus on Istios security and authentication capabilities. Well discuss how to secure all interservice communications in a mesh, even against insiders with access to the network, without any changes to the application code or the deployment.
In the next and last installment of this series, well focus on Istios security and authentication capabilities. Well discuss how to secure all interservice communications in a mesh, even against insiders with access to the network, without any changes to the application code or the deployment.

View File

@ -3,11 +3,10 @@ title: " Introducing Software Certification for Kubernetes "
date: 2017-10-19
slug: software-conformance-certification
url: /blog/2017/10/Software-Conformance-Certification
author: >
William Denniss (Google)
---
_**Editor's Note: Today's post is by William Denniss, Product Manager, Google Cloud on the new Certified Kubernetes Conformance Program.**_
Over the last three years, Kubernetes® has seen wide-scale adoption by a vibrant and diverse community of providers. In fact, there are now more than [60](https://docs.google.com/spreadsheets/d/1LxSqBzjOxfGx3cmtZ4EbB_BGCxT_wlxW_xgHVVa23es/edit#gid=0) known Kubernetes platforms and distributions. From the start, one goal of Kubernetes has been consistency and portability.
In order to better serve this goal, today the Kubernetes community and the Cloud Native Computing Foundation® (CNCF®) announce the availability of the beta Certified Kubernetes Conformance Program. The Kubernetes conformance certification program gives users the confidence that when they use a Certified Kubernetes™ product, they can rely on a high level of common functionality. Certification provides Independent Software Vendors (ISVs) confidence that if their customer is using a Certified Kubernetes product, their software will behave as expected.

View File

@ -3,8 +3,10 @@ title: " Using RBAC, Generally Available in Kubernetes v1.8 "
date: 2017-10-28
slug: using-rbac-generally-available-18
url: /blog/2017/10/Using-Rbac-Generally-Available-18
author: >
Eric Chiang (CoreOS)
---
**_Editor's note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8. Todays post comes from Eric Chiang, software engineer, CoreOS, and SIG-Auth co-lead._**
_**Editor's note:** this post is part of a [series of in-depth articles](/blog/2017/10/five-days-of-kubernetes-18) on what's new in Kubernetes 1.8._
Kubernetes 1.8 represents a significant milestone for the [role-based access control (RBAC) authorizer](/docs/reference/access-authn-authz/rbac/), which was promoted to GA in this release. RBAC is a mechanism for controlling access to the Kubernetes API, and since its [beta in 1.6](https://kubernetes.io/blog/2017/04/rbac-support-in-kubernetes), many Kubernetes clusters and provisioning strategies have enabled it by default.

View File

@ -3,10 +3,11 @@ title: " Containerd Brings More Container Runtime Options for Kubernetes "
date: 2017-11-02
slug: containerd-container-runtime-options-kubernetes
url: /blog/2017/11/Containerd-Container-Runtime-Options-Kubernetes
author: >
Lantao Liu (Google),
Mike Brown (IBM)
---
**Authors:** Lantao Liu (Google), and Mike Brown (IBM)
_Update: Kubernetes support for Docker via `dockershim` is now deprecated.
For more information, read the [deprecation notice](/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation).
You can also discuss the deprecation via a dedicated [GitHub issue](https://github.com/kubernetes/kubernetes/issues/106917)._

View File

@ -3,9 +3,9 @@ title: " Kubernetes the Easy Way "
date: 2017-11-01
slug: kubernetes-easy-way
url: /blog/2017/11/Kubernetes-Easy-Way
author: >
Dan Garfield (Codefresh)
---
**_Editor's note: Today's post is by Dan Garfield, VP of Marketing at Codefresh, on how to set up and easily deploy a Kubernetes cluster._**
Kelsey Hightower wrote an invaluable guide for Kubernetes called [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way). Its an awesome resource for those looking to understand the ins and outs of Kubernetes—but what if you want to put Kubernetes on easy mode? Thats something weve been working on together with Google Cloud. In this guide, well show you how to get a cluster up and running, as well as how to actually deploy your code to that cluster and run it.

View File

@ -3,8 +3,10 @@ title: " Securing Software Supply Chain with Grafeas "
date: 2017-11-03
slug: securing-software-supply-chain-grafeas
url: /blog/2017/11/Securing-Software-Supply-Chain-Grafeas
author: >
Kelsey Hightower (Google),
Sandra Guo (Google)
---
**_Editor's note: This post is written by Kelsey Hightower, Staff Developer Advocate at Google, and Sandra Guo, Product Manager at Google._**
Kubernetes has evolved to support increasingly complex classes of applications, enabling the development of two major industry trends: hybrid cloud and microservices. With increasing complexity in production environments, customers—especially enterprises—are demanding better ways to manage their software supply chain with more centralized visibility and control over production deployments.

View File

@ -3,12 +3,11 @@ title: " Introducing Kubeflow - A Composable, Portable, Scalable ML Stack Built
date: 2017-12-21
slug: introducing-kubeflow-composable
url: /blog/2017/12/Introducing-Kubeflow-Composable
author: >
Jeremy Lewi (Google),
David Aronchick (Google)
---
**_Todays post is by David Aronchick and Jeremy Lewi, a PM and Engineer on the Kubeflow project, a new open source GitHub repo dedicated to making using machine learning (ML) stacks on Kubernetes easy, fast and extensible._**
## Kubernetes and Machine Learning
Kubernetes has quickly become the hybrid solution for deploying complicated workloads anywhere. While it started with just stateless services, customers have begun to move complex workloads to the platform, taking advantage of rich APIs, reliability and performance provided by Kubernetes. One of the fastest growing use cases is to use Kubernetes as the deployment platform of choice for machine learning.
@ -168,8 +167,6 @@ And were just getting started! We would love for you to help. How you might a
- Please download and run kubeflow, and submit bugs!
Thank you for your support so far, we could not be more excited!
_Jeremy Lewi & David Aronchick_
Google
Note:
* This article was amended in June 2023 to update the trained model bucket location.

View File

@ -4,10 +4,10 @@ date: 2017-12-15
slug: kubernetes-19-workloads-expanded-ecosystem
url: /blog/2017/12/Kubernetes-19-Workloads-Expanded-Ecosystem
evergreen: true
author: >
[Kubernetes v1.9 Release Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.9/release_team.md)
---
**Authors:** Kubernetes v1.9 release team
Were pleased to announce the delivery of Kubernetes 1.9, our fourth and final release this year.
Todays release continues the evolution of an increasingly rich feature set, more robust stability, and even greater community contributions. As the fourth release of the year, it gives us an opportunity to look back at the progress made in key areas. Particularly notable is the advancement of the Apps Workloads API to stable. This removes any reservations potential adopters might have had about the functional stability required to run mission-critical workloads. Another big milestone is the beta release of Windows support, which opens the door for many Windows-specific applications and workloads to run in Kubernetes, significantly expanding the implementation scenarios and enterprise readiness of Kubernetes.

View File

@ -3,8 +3,16 @@ title: " PaddlePaddle Fluid: Elastic Deep Learning on Kubernetes "
date: 2017-12-06
slug: paddle-paddle-fluid-elastic-learning
url: /blog/2017/12/Paddle-Paddle-Fluid-Elastic-Learning
author: >
Xu Yan (Baidu Research),
Helin Wang (Baidu Research),
Yi Wu (Baidu Research),
Xi Chen (Baidu Research),
Weibao Gong (Baidu Research),
Xiang Li (CoreOS),
Yi Wang (Baidu Research)
---
_Editor's note: Today's post is a joint post from the deep learning team at Baidu and the etcd team at CoreOS._
_**Editor's note:** Today's post is a joint post from the deep learning team at Baidu and the etcd team at CoreOS_
@ -39,11 +47,4 @@ In the second test, each experiment ran 400 Nginx pods, which has higher priorit
We continue to work on FluidEDL and welcome comments and contributions. Visit the [PaddlePaddle repo](https://github.com/PaddlePaddle/cloud), where you can find the [design doc](https://github.com/PaddlePaddle/cloud/tree/develop/doc/design), a [simple tutorial](https://github.com/PaddlePaddle/cloud/blob/develop/doc/autoscale/example/autoscale.md), and [experiment details](https://github.com/PaddlePaddle/cloud/tree/develop/doc/edl/experiment).
- Xu Yan (Baidu Research)
- Helin Wang (Baidu Research)
- Yi Wu (Baidu Research)
- Xi Chen (Baidu Research)
- Weibao Gong (Baidu Research)
- Xiang Li (CoreOS)
- Yi Wang (Baidu Research)

View File

@ -3,8 +3,8 @@ layout: blog
title: "Feature Highlight: CPU Manager"
date: 2018-07-24
author: >
[Balaji Subramaniam]((mailto:balaji.subramaniam@intel.com)) (Intel),
[Connor Doyle](mailto:connor.p.doyle@intel.com) (Intel])
[Balaji Subramaniam](mailto:balaji.subramaniam@intel.com) (Intel),
[Connor Doyle](mailto:connor.p.doyle@intel.com) (Intel)
---
This blog post describes the [CPU Manager](/docs/tasks/administer-cluster/cpu-management-policies/), a beta feature in [Kubernetes](https://kubernetes.io/). The CPU manager feature enables better placement of workloads in the [Kubelet](/docs/reference/command-line-tools-reference/kubelet/), the Kubernetes node agent, by allocating exclusive CPUs to certain pod containers.

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -0,0 +1,196 @@
---
layout: blog
title: "10 Years of Kubernetes"
date: 2024-06-06
slug: 10-years-of-kubernetes
author: >
[Bob Killen](https://github.com/mrbobbytables) (CNCF),
[Chris Short](https://github.com/chris-short) (AWS),
[Frederico Muñoz](https://github.com/fsmunoz) (SAS),
[Kaslin Fields](https://github.com/kaslin) (Google),
[Tim Bannister](https://github.com/sftim) (The Scale Factory),
and every contributor across the globe
---
![KCSEU 2024 group photo](kcseu2024.jpg)
Ten (10) years ago, on June 6th, 2014, the
[first commit](https://github.com/kubernetes/kubernetes/commit/2c4b3a562ce34cddc3f8218a2c4d11c7310e6d56)
of Kubernetes was pushed to GitHub. That first commit with 250 files and 47,501 lines of go, bash
and markdown kicked off the project we have today. Who could have predicted that 10 years later,
Kubernetes would grow to become one of the largest Open Source projects to date with over
[88,000 contributors](https://k8s.devstats.cncf.io/d/24/overall-project-statistics?orgId=1) from
more than [8,000 companies](https://www.cncf.io/reports/kubernetes-project-journey-report/), across
44 countries.
<img src="kcscn2019.jpg" alt="KCSCN 2019" class="left" style="max-width: 20em; margin: 1em" >
This milestone isn't just for Kubernetes but for the Cloud Native ecosystem that blossomed from
it. There are close to [200 projects](https://all.devstats.cncf.io/d/18/overall-project-statistics-table?orgId=1)
within the CNCF itself, with contributions from
[240,000+ individual contributors](https://all.devstats.cncf.io/d/18/overall-project-statistics-table?orgId=1) and
thousands more in the greater ecosystem. Kubernetes would not be where it is today without them, the
[7M+ Developers](https://www.cncf.io/blog/2022/05/18/slashdata-cloud-native-continues-to-grow-with-more-than-7-million-developers-worldwide/),
and the even larger user community that have all helped shape the ecosystem that it is today.
## Kubernetes' beginnings - a converging of technologies
The ideas underlying Kubernetes started well before the first commit, or even the first prototype
([which came about in 2013](/blog/2018/07/20/the-history-of-kubernetes-the-community-behind-it/)).
In the early 2000s, Moore's Law was well in effect. Computing hardware was becoming more and more
powerful at an incredibly fast rate. Correspondingly, applications were growing more and more
complex. This combination of hardware commoditization and application complexity pointed to a need
to further abstract software from hardware, and solutions started to emerge.
Like many companies at the time, Google was scaling rapidly, and its engineers were interested in
the idea of creating a form of isolation in the Linux kernel. Google engineer Rohit Seth described
the concept in an [email in 2006](https://lwn.net/Articles/199643/):
> We use the term container to indicate a structure against which we track and charge utilization of
system resources like memory, tasks, etc. for a Workload.
<img src="future.png" alt="The future of Linux containers" class="right" style="max-width: 20em; margin: 1em">
In March of 2013, a 5-minute lightning talk called
["The future of Linux Containers," presented by Solomon Hykes at PyCon](https://youtu.be/wW9CAH9nSLs?si=VtK_VFQHymOT7BIB),
introduced an upcoming open source tool called "Docker" for creating and using Linux
Containers. Docker introduced a level of usability to Linux Containers that made them accessible to
more users than ever before, and the popularity of Docker, and thus of Linux Containers,
skyrocketed. With Docker making the abstraction of Linux Containers accessible to all, running
applications in much more portable and repeatable ways was suddenly possible, but the question of
scale remained.
Google's Borg system for managing application orchestration at scale had adopted Linux containers as
they were developed in the mid-2000s. Since then, the company had also started working on a new
version of the system called "Omega." Engineers at Google who were familiar with the Borg and Omega
systems saw the popularity of containerization driven by Docker. They recognized not only the need
for an open source container orchestration system but its "inevitability," as described by Brendan
Burns in
[this blog post](/blog/2018/07/20/the-history-of-kubernetes-the-community-behind-it/).
That realization in the fall of 2013 inspired a small team to start working on a project that would
later become **Kubernetes**. That team included Joe Beda, Brendan Burns, Craig McLuckie, Ville
Aikas, Tim Hockin, Dawn Chen, Brian Grant, and Daniel Smith.
## A decade of Kubernetes
<img src="kubeconeu2017.jpg" alt="KubeCon EU 2017" class="left" style="max-width: 20em; margin: 1em">
Kubernetes' history begins with that historic commit on June 6th, 2014, and the subsequent
announcement of the project in a June 10th
[keynote by Google engineer Eric Brewer at DockerCon 2014](https://youtu.be/YrxnVKZeqK8?si=Q_wYBFn7dsS9H3k3)
(and its corresponding [Google blog](https://cloudplatform.googleblog.com/2014/06/an-update-on-container-support-on-google-cloud-platform.html)).
Over the next year, a small community of
[contributors, largely from Google and Red Hat](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=Before%20joining%20CNCF&var-metric=contributors),
worked hard on the project, culminating in a [version 1.0 release on July 21st, 2015](https://cloudplatform.googleblog.com/2015/07/Kubernetes-V1-Released.html).
Alongside 1.0, Google announced that Kubernetes would be donated to a newly formed branch of the
Linux Foundation called the
[Cloud Native Computing Foundation (CNCF)](https://www.cncf.io/announcements/2015/06/21/new-cloud-native-computing-foundation-to-drive-alignment-among-container-technologies/).
Despite reaching 1.0, the Kubernetes project was still very challenging to use and
understand. Kubernetes contributor Kelsey Hightower took special note of the project's shortcomings
in ease of use and on July 7, 2016, he pushed the
[first commit of his famed "Kubernetes the Hard Way" guide](https://github.com/kelseyhightower/kubernetes-the-hard-way/commit/9d7ace8b186f6ebd2e93e08265f3530ec2fba81c).
The project has changed enormously since its original 1.0 release; experiencing a number of big wins
such as
[Custom Resource Definitions (CRD) going GA in 1.16](/blog/2019/09/18/kubernetes-1-16-release-announcement/)
or [full dual stack support launching in 1.23](/blog/2021/12/08/dual-stack-networking-ga/) and
community "lessons learned" from the [removal of widely used beta APIs in 1.22](/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/)
or the deprecation of [Dockershim](/blog/2020/12/02/dockershim-faq/).
Some notable updates, milestones and events since 1.0 include:
* December 2016 - [Kubernetes 1.5](/blog/2016/12/kubernetes-1-5-supporting-production-workloads/) introduces runtime pluggability with initial CRI support and alpha Windows node support. OpenAPI also appears for the first time, paving the way for clients to be able to discover extension APIs.
* This release also introduced StatefulSets and PodDisruptionBudgets in Beta.
* April 2017 — [Introduction of Role-Based Access Controls or RBAC](/blog/2017/04/rbac-support-in-kubernetes/).
* June 2017 — In [Kubernetes 1.7](/blog/2017/06/kubernetes-1-7-security-hardening-stateful-application-extensibility-updates/), ThirdPartyResources or "TPRs" are replaced with CustomResourceDefinitions (CRDs).
* December 2017 — [Kubernetes 1.9](/blog/2017/12/kubernetes-19-workloads-expanded-ecosystem/) sees the Workloads API becoming GA (Generally Available). The release blog states: _"Deployment and ReplicaSet, two of the most commonly used objects in Kubernetes, are now stabilized after more than a year of real-world use and feedback."_
* December 2018 — In 1.13, the Container Storage Interface (CSI) reaches GA, kubeadm tool for bootstrapping minimum viable clusters reaches GA, and CoreDNS becomes the default DNS server.
* September 2019 — [Custom Resource Definitions go GA](/blog/2019/09/18/kubernetes-1-16-release-announcement/) in Kubernetes 1.16.
* August 2020 — [Kubernetes 1.19](/blog/2016/12/kubernetes-1-5-supporting-production-workloads/) increases the support window for releases to 1 year.
* December 2020 — [Dockershim is deprecated](/blog/2020/12/18/kubernetes-1.20-pod-impersonation-short-lived-volumes-in-csi/) in 1.20
* April 2021 — the [Kubernetes release cadence changes](/blog/2021/07/20/new-kubernetes-release-cadence/#:~:text=On%20April%2023%2C%202021%2C%20the,Kubernetes%20community's%20contributors%20and%20maintainers.) from 4 releases per year to 3 releases per year.
* July 2021 — Widely used beta APIs are [removed](/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/) in Kubernetes 1.22.
* May 2022 — Kubernetes 1.24 sees [beta APIs become disabled by default](/blog/2022/05/03/kubernetes-1-24-release-announcement/) to reduce upgrade conflicts and removal of [Dockershim](/dockershim), leading to [widespread user confusion](https://www.youtube.com/watch?v=a03Hh1kd6KE) (we've since [improved our communication!](https://github.com/kubernetes/community/tree/master/communication/contributor-comms))
* December 2022 — In 1.26, there was a significant batch and [Job API overhaul](/blog/2022/12/29/scalable-job-tracking-ga/) that paved the way for better support for AI /ML / batch workloads.
**PS:** Curious to see how far the project has come for yourself? Check out this [tutorial for spinning up a Kubernetes 1.0 cluster](https://github.com/spurin/kubernetes-v1.0-lab) created by community members Carlos Santana, Amim Moises Salum Knabben, and James Spurin.
---
Kubernetes offers more extension points than we can count. Originally designed to work with Docker
and only Docker, now you can plug in any container runtime that adheres to the CRI standard. There
are other similar interfaces: CSI for storage and CNI for networking. And that's far from all you
can do. In the last decade, whole new patterns have emerged, such as using
[Custom Resource Definitions](/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
(CRDs) to support third-party controllers - now a huge part of the Kubernetes ecosystem.
The community building the project has also expanded immensely over the last decade. Using
[DevStats](https://k8s.devstats.cncf.io/d/24/overall-project-statistics?orgId=1), we can see the
incredible volume of contribution over the last decade that has made Kubernetes the
[second-largest open source project in the world](https://www.cncf.io/reports/kubernetes-project-journey-report/):
* **88,474** contributors
* **15,121** code committers
* **4,228,347** contributions
* **158,530** issues
* **311,787** pull requests
## Kubernetes today
<img src="welcome.jpg" alt="KubeCon NA 2023" class="left" style="max-width: 20em; margin: 1em">
Since its early days, the project has seen enormous growth in technical capability, usage, and
contribution. The project is still actively working to improve and better serve its users.
In the upcoming 1.31 release, the project will celebrate the culmination of an important long-term
project: the removal of in-tree cloud provider code. In this
[largest migration in Kubernetes history](/blog/2024/05/20/completing-cloud-provider-migration/),
roughly 1.5 million lines of code have been removed, reducing the binary sizes of core components
by approximately 40%. In the project's early days, it was clear that extensibility would be key to
success. However, it wasn't always clear how that extensibility should be achieved. This migration
removes a variety of vendor-specific capabilities from the core Kubernetes code
base. Vendor-specific capabilities can now be better served by other pluggable extensibility
features or patterns, such as
[Custom Resource Definitions (CRDs)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
or API standards like the [Gateway API](https://gateway-api.sigs.k8s.io/).
Kubernetes also faces new challenges in serving its vast user base, and the community is adapting
accordingly. One example of this is the migration of image hosting to the new, community-owned
registry.k8s.io. The egress bandwidth and costs of providing pre-compiled binary images for user
consumption have become immense. This new registry change enables the community to continue
providing these convenient images in more cost- and performance-efficient ways. Make sure you check
out the [blog post](/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga/) and
update any automation you have to use registry.k8s.io!
## The future of Kubernetes
<img src="lts.jpg" alt="" class="right" width="300px" style="max-width: 20em; margin: 1em">
A decade in, the future of Kubernetes still looks bright. The community is prioritizing changes that
both improve the user experiences, and enhance the sustainability of the project. The world of
application development continues to evolve, and Kubernetes is poised to change along with it.
In 2024, the advent of AI changed a once-niche workload type into one of prominent
importance. Distributed computing and workload scheduling has always gone hand-in-hand with the
resource-intensive needs of Artificial Intelligence, Machine Learning, and High Performance
Computing workloads. Contributors are paying close attention to the needs of newly developed
workloads and how Kubernetes can best serve them. The new
[Serving Working Group](https://github.com/kubernetes/community/tree/master/wg-serving) is one
example of how the community is organizing to address these workloads' needs. It's likely that the
next few years will see improvements to Kubernetes' ability to manage various types of hardware, and
its ability to manage the scheduling of large batch-style workloads which are run across hardware in
chunks.
The ecosystem around Kubernetes will continue to grow and evolve. In the future, initiatives to
maintain the sustainability of the project, like the migration of in-tree vendor code and the
registry change, will be ever more important.
The next 10 years of Kubernetes will be guided by its users and the ecosystem, but most of all, by
the people who contribute to it. The community remains open to new contributors. You can find more
information about contributing in our New Contributor Course at
[https://k8s.dev/docs/onboarding](https://k8s.dev/docs/onboarding).
We look forward to building the future of Kubernetes with you!
{{< figure src="kcsna2023.jpg" alt="KCSNA 2023">}}

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 479 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

View File

@ -0,0 +1,13 @@
---
title: Kubernetes' 10th birthday
content_type: special
display_date_start: 2024-06-06
display_date_end: 2024-06-15
# Only ever included, not rendered as a page in its own right
_build:
list: never
render: false
---
Kubernetes 10 Years

View File

@ -59,6 +59,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet
works with the [NGINX](https://www.nginx.com/resources/glossary/nginx/) webserver (as a proxy).
* The [ngrok Kubernetes Ingress Controller](https://github.com/ngrok/kubernetes-ingress-controller) is an open source controller for adding secure public access to your K8s services using the [ngrok platform](https://ngrok.com).
* The [OCI Native Ingress Controller](https://github.com/oracle/oci-native-ingress-controller#readme) is an Ingress controller for Oracle Cloud Infrastructure which allows you to manage the [OCI Load Balancer](https://docs.oracle.com/en-us/iaas/Content/Balance/home.htm).
* [OpenNJet Ingress Controller](https://gitee.com/njet-rd/open-njet-kic) is a [OpenNJet](https://njet.org.cn/)-based ingress controller.
* The [Pomerium Ingress Controller](https://www.pomerium.com/docs/k8s/ingress.html) is based on [Pomerium](https://pomerium.com/), which offers context-aware access policy.
* [Skipper](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/) HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress, designed as a library to build your custom proxy.
* The [Traefik Kubernetes Ingress provider](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) is an

View File

@ -101,7 +101,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web

View File

@ -81,7 +81,7 @@ Value | Description
`Pending` | The Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run. This includes time a Pod spends waiting to be scheduled as well as the time spent downloading container images over the network.
`Running` | The Pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting.
`Succeeded` | All containers in the Pod have terminated in success, and will not be restarted.
`Failed` | All containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system.
`Failed` | All containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system, and is not set for automatic restarting.
`Unknown` | For some reason the state of the Pod could not be obtained. This phase typically occurs due to an error in communicating with the node where the Pod should be running.
{{< note >}}
@ -671,4 +671,4 @@ for more details.
* For detailed information about Pod and container status in the API, see
the API reference documentation covering
[`status`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodStatus) for Pod.
[`status`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodStatus) for Pod.

View File

@ -249,11 +249,11 @@ also known as PascalCase. Do not write API kinds with code formatting.
Don't split an API object name into separate words. For example, use PodTemplateList, not Pod Template List.
For more information about PascalCase and code formatting, please review the related guidance on
For more information about PascalCase and code formatting, review the related guidance on
[Use upper camel case for API objects](/docs/contribute/style/style-guide/#use-upper-camel-case-for-api-objects)
and [Use code style for inline code, commands, and API objects](/docs/contribute/style/style-guide/#code-style-inline-code).
For more information about Kubernetes API terminologies, please review the related
For more information about Kubernetes API terminologies, review the related
guidance on [Kubernetes API terminology](/docs/reference/using-api/api-concepts/#standard-api-terminology).
## Code snippet formatting

View File

@ -1,5 +1,5 @@
---
title: NameGenerationRetries
title: RetryGenerateName
content_type: feature_gate
_build:

View File

@ -59,8 +59,8 @@ Example CEL expressions:
| `self.metadata.name == 'singleton'` | Validate that an object's name matches a specific value (making it a singleton) |
| `self.set1.all(e, !(e in self.set2))` | Validate that two listSets are disjoint |
| `self.names.size() == self.details.size() && self.names.all(n, n in self.details)` | Validate the 'details' map is keyed by the items in the 'names' listSet |
| `self.details.all(key, key.matches('^[a-zA-Z]*$')` | Validate the keys of the 'details' map |
| `self.details.all(key, self.details[key].matches('^[a-zA-Z]*$')` | Validate the values of the 'details' map |
| `self.details.all(key, key.matches('^[a-zA-Z]*$'))` | Validate the keys of the 'details' map |
| `self.details.all(key, self.details[key].matches('^[a-zA-Z]*$'))` | Validate the values of the 'details' map |
{{< /table >}}
## CEL options, language features, and libraries
@ -133,8 +133,8 @@ Examples:
{{< table caption="Examples of CEL expressions using regex library functions" >}}
| CEL Expression | Purpose |
|-------------------------------------------------------------|----------------------------------------------------------|
| `"abc 123".find('[0-9]*')` | Find the first number in a string |
| `"1, 2, 3, 4".findAll('[0-9]*').map(x, int(x)).sum() < 100` | Verify that the numbers in a string sum to less than 100 |
| `"abc 123".find('[0-9]+')` | Find the first number in a string |
| `"1, 2, 3, 4".findAll('[0-9]+').map(x, int(x)).sum() < 100` | Verify that the numbers in a string sum to less than 100 |
{{< /table >}}
See the [Kubernetes regex library](https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/library#Regex)
@ -239,7 +239,7 @@ Examples:
| `quantity("500000G").isInteger()` | Test if conversion to integer would throw an error |
| `quantity("50k").asInteger()` | Precise conversion to integer |
| `quantity("9999999999999999999999999999999999999G").asApproximateFloat()` | Lossy conversion to float |
| `quantity("50k").add("20k")` | Add two quantities |
| `quantity("50k").add(quantity("20k"))` | Add two quantities |
| `quantity("50k").sub(20000)` | Subtract an integer from a quantity |
| `quantity("50k").add(20).sub(quantity("100k")).sub(-50000)` | Chain adding and subtracting integers and quantities |
| `quantity("200M").compareTo(quantity("0.2G"))` | Compare two quantities |

View File

@ -342,24 +342,36 @@ Before starting the restore operation, a snapshot file must be present. It can
either be a snapshot file from a previous backup operation, or from a remaining
[data directory](https://etcd.io/docs/current/op-guide/configuration/#--data-dir).
When restoring the cluster, use the `--data-dir` option to specify to which folder the cluster should be restored:
{{< tabs name="etcd_restore" >}}
{{% tab name="Use etcdutl" %}}
When restoring the cluster using [`etcdutl`](https://github.com/etcd-io/etcd/blob/main/etcdutl/README.md),
use the `--data-dir` option to specify to which folder the cluster should be restored:
```shell
etcdutl --data-dir <data-dir-location> snapshot restore snapshot.db
```
where `<data-dir-location>` is a directory that will be created during the restore process.
```shell
etcdutl --data-dir <data-dir-location> snapshot restore snapshot.db
```
where `<data-dir-location>` is a directory that will be created during the restore process.
The below example depicts the usage of the `etcdctl` tool for the restore operation:
{{< note >}}
The usage of `etcdctl` for restoring has been deprecated since etcd v3.5.x and may be removed from a future etcd release.
{{< /note >}}
{{% /tab %}}
{{% tab name="Use etcdctl (Deprecated)" %}}
```shell
export ETCDCTL_API=3
etcdctl --data-dir <data-dir-location> snapshot restore snapshot.db
```
{{< note >}}
The usage of `etcdctl` for restoring has been **deprecated** since etcd v3.5.x and is slated for removal from etcd v3.6.
It is recommended to utilize [`etcdutl`](https://github.com/etcd-io/etcd/blob/main/etcdutl/README.md) instead.
{{< /note >}}
If `<data-dir-location>` is the same folder as before, delete it and stop the etcd process before restoring the cluster. Otherwise, change etcd configuration and restart the etcd process after restoration to have it use the new data directory.
The below example depicts the usage of the `etcdctl` tool for the restore operation:
```shell
export ETCDCTL_API=3
etcdctl --data-dir <data-dir-location> snapshot restore snapshot.db
```
If `<data-dir-location>` is the same folder as before, delete it and stop the etcd process before restoring the cluster.
Otherwise, change etcd configuration and restart the etcd process after restoration to have it use the new data directory.
{{% /tab %}}
{{< /tabs >}}
For more information and examples on restoring a cluster from a snapshot file, see
[etcd disaster recovery documentation](https://etcd.io/docs/current/op-guide/recovery/#restoring-a-cluster).

View File

@ -5,7 +5,6 @@ reviewers:
- dashpole
title: Reserve Compute Resources for System Daemons
content_type: task
min-kubernetes-server-version: 1.8
weight: 290
---
@ -25,10 +24,10 @@ on each node.
## {{% heading "prerequisites" %}}
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
Your Kubernetes server must be at or later than version 1.17 to use
the kubelet command line option `--reserved-cpus` to set an
[explicitly reserved CPU list](#explicitly-reserved-cpu-list).
{{< include "task-tutorial-prereqs.md" >}}
You can configure below kubelet [configuration settings](/docs/reference/config-api/kubelet-config.v1beta1/)
using the [kubelet configuration file](/docs/tasks/administer-cluster/kubelet-config-file/).
<!-- steps -->
@ -48,15 +47,14 @@ Resources can be reserved for two categories of system daemons in the `kubelet`.
### Enabling QoS and Pod level cgroups
To properly enforce node allocatable constraints on the node, you must
enable the new cgroup hierarchy via the `--cgroups-per-qos` flag. This flag is
enable the new cgroup hierarchy via the `cgroupsPerQOS` setting. This setting is
enabled by default. When enabled, the `kubelet` will parent all end-user pods
under a cgroup hierarchy managed by the `kubelet`.
### Configuring a cgroup driver
The `kubelet` supports manipulation of the cgroup hierarchy on
the host using a cgroup driver. The driver is configured via the
`--cgroup-driver` flag.
the host using a cgroup driver. The driver is configured via the `cgroupDriver` setting.
The supported values are the following:
@ -73,21 +71,21 @@ be configured to use the `systemd` cgroup driver.
### Kube Reserved
- **Kubelet Flag**: `--kube-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi][,][pid=1000]`
- **Kubelet Flag**: `--kube-reserved-cgroup=`
- **KubeletConfiguration Setting**: `kubeReserved: {}`. Example value `{cpu: 100m, memory: 100Mi, ephemeral-storage: 1Gi, pid=1000}`
- **KubeletConfiguration Setting**: `kubeReservedCgroup: ""`
`kube-reserved` is meant to capture resource reservation for kubernetes system
daemons like the `kubelet`, `container runtime`, `node problem detector`, etc.
`kubeReserved` is meant to capture resource reservation for kubernetes system
daemons like the `kubelet`, `container runtime`, etc.
It is not meant to reserve resources for system daemons that are run as pods.
`kube-reserved` is typically a function of `pod density` on the nodes.
`kubeReserved` is typically a function of `pod density` on the nodes.
In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be
specified to reserve the specified number of process IDs for
kubernetes system daemons.
To optionally enforce `kube-reserved` on kubernetes system daemons, specify the parent
control group for kube daemons as the value for `--kube-reserved-cgroup` kubelet
flag.
To optionally enforce `kubeReserved` on kubernetes system daemons, specify the parent
control group for kube daemons as the value for `kubeReservedCgroup` setting,
and [add `kube-reserved` to `enforceNodeAllocatable`](#enforcing-node-allocatable).
It is recommended that the kubernetes system daemons are placed under a top
level control group (`runtime.slice` on systemd machines for example). Each
@ -95,19 +93,19 @@ system daemon should ideally run within its own child control group. Refer to
[the design proposal](https://git.k8s.io/design-proposals-archive/node/node-allocatable.md#recommended-cgroups-setup)
for more details on recommended control group hierarchy.
Note that Kubelet **does not** create `--kube-reserved-cgroup` if it doesn't
Note that Kubelet **does not** create `kubeReservedCgroup` if it doesn't
exist. The kubelet will fail to start if an invalid cgroup is specified. With `systemd`
cgroup driver, you should follow a specific pattern for the name of the cgroup you
define: the name should be the value you set for `--kube-reserved-cgroup`,
define: the name should be the value you set for `kubeReservedCgroup`,
with `.slice` appended.
### System Reserved
- **Kubelet Flag**: `--system-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi][,][pid=1000]`
- **Kubelet Flag**: `--system-reserved-cgroup=`
- **KubeletConfiguration Setting**: `systemReserved: {}`. Example value `{cpu: 100m, memory: 100Mi, ephemeral-storage: 1Gi, pid=1000}`
- **KubeletConfiguration Setting**: `systemReservedCgroup: ""`
`system-reserved` is meant to capture resource reservation for OS system daemons
like `sshd`, `udev`, etc. `system-reserved` should reserve `memory` for the
`systemReserved` is meant to capture resource reservation for OS system daemons
like `sshd`, `udev`, etc. `systemReserved` should reserve `memory` for the
`kernel` too since `kernel` memory is not accounted to pods in Kubernetes at this time.
Reserving resources for user login sessions is also recommended (`user.slice` in
systemd world).
@ -116,33 +114,32 @@ In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be
specified to reserve the specified number of process IDs for OS system
daemons.
To optionally enforce `system-reserved` on system daemons, specify the parent
control group for OS system daemons as the value for `--system-reserved-cgroup`
kubelet flag.
To optionally enforce `systemReserved` on system daemons, specify the parent
control group for OS system daemons as the value for `systemReservedCgroup` setting,
and [add `system-reserved` to `enforceNodeAllocatable`](#enforcing-node-allocatable).
It is recommended that the OS system daemons are placed under a top level
control group (`system.slice` on systemd machines for example).
Note that `kubelet` **does not** create `--system-reserved-cgroup` if it doesn't
Note that `kubelet` **does not** create `systemReservedCgroup` if it doesn't
exist. `kubelet` will fail if an invalid cgroup is specified. With `systemd`
cgroup driver, you should follow a specific pattern for the name of the cgroup you
define: the name should be the value you set for `--system-reserved-cgroup`,
define: the name should be the value you set for `systemReservedCgroup`,
with `.slice` appended.
### Explicitly Reserved CPU List
{{< feature-state for_k8s_version="v1.17" state="stable" >}}
**Kubelet Flag**: `--reserved-cpus=0-3`
**KubeletConfiguration Flag**: `reservedSystemCPUs: 0-3`
**KubeletConfiguration Setting**: `reservedSystemCPUs:`. Example value `0-3`
`reserved-cpus` is meant to define an explicit CPU set for OS system daemons and
kubernetes system daemons. `reserved-cpus` is for systems that do not intend to
`reservedSystemCPUs` is meant to define an explicit CPU set for OS system daemons and
kubernetes system daemons. `reservedSystemCPUs` is for systems that do not intend to
define separate top level cgroups for OS system daemons and kubernetes system daemons
with regard to cpuset resource.
If the Kubelet **does not** have `--system-reserved-cgroup` and `--kube-reserved-cgroup`,
the explicit cpuset provided by `reserved-cpus` will take precedence over the CPUs
defined by `--kube-reserved` and `--system-reserved` options.
If the Kubelet **does not** have `kubeReservedCgroup` and `systemReservedCgroup`,
the explicit cpuset provided by `reservedSystemCPUs` will take precedence over the CPUs
defined by `kubeReservedCgroup` and `systemReservedCgroup` options.
This option is specifically designed for Telco/NFV use cases where uncontrolled
interrupts/timers may impact the workload performance. you can use this option
@ -155,7 +152,7 @@ For example: in Centos, you can do this using the tuned toolset.
### Eviction Thresholds
**Kubelet Flag**: `--eviction-hard=[memory.available<500Mi]`
**KubeletConfiguration Setting**: `evictionHard: {memory.available: "100Mi", nodefs.available: "10%", nodefs.inodesFree: "5%", imagefs.available: "15%"}`. Example value: `{memory.available: "<500Mi"}`
Memory pressure at the node level leads to System OOMs which affects the entire
node and all pods running on it. Nodes can go offline temporarily until memory
@ -163,7 +160,7 @@ has been reclaimed. To avoid (or reduce the probability of) system OOMs kubelet
provides [out of resource](/docs/concepts/scheduling-eviction/node-pressure-eviction/)
management. Evictions are
supported for `memory` and `ephemeral-storage` only. By reserving some memory via
`--eviction-hard` flag, the `kubelet` attempts to evict pods whenever memory
`evictionHard` setting, the `kubelet` attempts to evict pods whenever memory
availability on the node drops below the reserved value. Hypothetically, if
system daemons did not exist on a node, pods cannot use more than `capacity -
eviction-hard`. For this reason, resources reserved for evictions are not
@ -171,7 +168,7 @@ available for pods.
### Enforcing Node Allocatable
**Kubelet Flag**: `--enforce-node-allocatable=pods[,][system-reserved][,][kube-reserved]`
**KubeletConfiguration setting**: `enforceNodeAllocatable: [pods]`. Example value: `[pods,system-reserved,kube-reserved]`
The scheduler treats 'Allocatable' as the available `capacity` for pods.
@ -180,35 +177,35 @@ by evicting pods whenever the overall usage across all pods exceeds
'Allocatable'. More details on eviction policy can be found
on the [node pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/)
page. This enforcement is controlled by
specifying `pods` value to the kubelet flag `--enforce-node-allocatable`.
specifying `pods` value to the KubeletConfiguration setting `enforceNodeAllocatable`.
Optionally, `kubelet` can be made to enforce `kube-reserved` and
`system-reserved` by specifying `kube-reserved` & `system-reserved` values in
the same flag. Note that to enforce `kube-reserved` or `system-reserved`,
`--kube-reserved-cgroup` or `--system-reserved-cgroup` needs to be specified
Optionally, `kubelet` can be made to enforce `kubeReserved` and
`systemReserved` by specifying `kube-reserved` & `system-reserved` values in
the same setting. Note that to enforce `kubeReserved` or `systemReserved`,
`kubeReservedCgroup` or `systemReservedCgroup` needs to be specified
respectively.
## General Guidelines
System daemons are expected to be treated similar to
[Guaranteed pods](/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed).
System daemons are expected to be treated similar to
[Guaranteed pods](/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed).
System daemons can burst within their bounding control groups and this behavior needs
to be managed as part of kubernetes deployments. For example, `kubelet` should
have its own control group and share `kube-reserved` resources with the
have its own control group and share `kubeReserved` resources with the
container runtime. However, Kubelet cannot burst and use up all available Node
resources if `kube-reserved` is enforced.
resources if `kubeReserved` is enforced.
Be extra careful while enforcing `system-reserved` reservation since it can lead
Be extra careful while enforcing `systemReserved` reservation since it can lead
to critical system services being CPU starved, OOM killed, or unable
to fork on the node. The
recommendation is to enforce `system-reserved` only if a user has profiled their
recommendation is to enforce `systemReserved` only if a user has profiled their
nodes exhaustively to come up with precise estimates and is confident in their
ability to recover if any process in that group is oom-killed.
* To begin with enforce 'Allocatable' on `pods`.
* Once adequate monitoring and alerting is in place to track kube system
daemons, attempt to enforce `kube-reserved` based on usage heuristics.
* If absolutely necessary, enforce `system-reserved` over time.
daemons, attempt to enforce `kubeReserved` based on usage heuristics.
* If absolutely necessary, enforce `systemReserved` over time.
The resource requirements of kube system daemons may grow over time as more and
more features are added. Over time, kubernetes project will attempt to bring
@ -222,9 +219,9 @@ So expect a drop in `Allocatable` capacity in future releases.
Here is an example to illustrate Node Allocatable computation:
* Node has `32Gi` of `memory`, `16 CPUs` and `100Gi` of `Storage`
* `--kube-reserved` is set to `cpu=1,memory=2Gi,ephemeral-storage=1Gi`
* `--system-reserved` is set to `cpu=500m,memory=1Gi,ephemeral-storage=1Gi`
* `--eviction-hard` is set to `memory.available<500Mi,nodefs.available<10%`
* `kubeReserved` is set to `{cpu: 1000m, memory: 2Gi, ephemeral-storage: 1Gi}`
* `systemReserved` is set to `{cpu: 500m, memory: 1Gi, ephemeral-storage: 1Gi}`
* `evictionHard` is set to `{memory.available: "<500Mi", nodefs.available: "<10%"}`
Under this scenario, 'Allocatable' will be 14.5 CPUs, 28.5Gi of memory and
`88Gi` of local storage.
@ -234,7 +231,7 @@ Kubelet evicts pods whenever the overall memory usage across pods exceeds 28.5Gi
or if overall disk usage exceeds 88Gi. If all processes on the node consume as
much CPU as they can, pods together cannot consume more than 14.5 CPUs.
If `kube-reserved` and/or `system-reserved` is not enforced and system daemons
If `kubeReserved` and/or `systemReserved` is not enforced and system daemons
exceed their reservation, `kubelet` evicts pods whenever the overall node memory
usage is higher than 31.5Gi or `storage` is greater than 90Gi.

View File

@ -7,7 +7,7 @@ description: Configure and manage huge pages as a schedulable resource in a clus
---
<!-- overview -->
{{< feature-state state="stable" >}}
{{< feature-state feature_gate_name="HugePages" >}}
Kubernetes supports the allocation and consumption of pre-allocated huge pages
by applications in a Pod. This page describes how users can consume huge pages.

View File

@ -141,7 +141,7 @@ recommended way to manage the creation and scaling of Pods.
kubectl config view
```
1. View application logs for a container in a pod.
1. View application logs for a container in a pod (replace pod name with the one you got from `kubectl get pods`).
```shell
kubectl logs hello-node-5f76cf6ccf-br9b5

View File

@ -593,7 +593,7 @@ In one terminal window, patch the `web` StatefulSet to change the container
image again:
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"registry.k8s.io/nginx-slim:0.8"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"registry.k8s.io/nginx-slim:0.24"}]'
```
```
statefulset.apps/web patched
@ -661,9 +661,9 @@ Get the Pods to view their container images:
for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
```
```
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.24
registry.k8s.io/nginx-slim:0.24
registry.k8s.io/nginx-slim:0.24
```
@ -705,7 +705,7 @@ Patch the StatefulSet again to change the container image that this
StatefulSet uses:
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"registry.k8s.io/nginx-slim:0.7"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"registry.k8s.io/nginx-slim:0.21"}]'
```
```
statefulset.apps/web patched
@ -740,7 +740,7 @@ Get the Pod's container image:
kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.24
```
Notice that, even though the update strategy is `RollingUpdate` the StatefulSet
@ -790,7 +790,7 @@ Get the Pod's container:
kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.21
```
@ -834,7 +834,7 @@ Get the `web-1` Pod's container image:
kubectl get pod web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.24
```
`web-1` was restored to its original configuration because the Pod's ordinal
@ -892,9 +892,9 @@ Get the container image details for the Pods in the StatefulSet:
for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
```
```
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.21
registry.k8s.io/nginx-slim:0.21
registry.k8s.io/nginx-slim:0.21
```
By moving the `partition` to `0`, you allowed the StatefulSet to

View File

@ -30,7 +30,7 @@ spec:
spec:
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.24
ports:
- containerPort: 80
name: web

View File

@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: nginx
image: registry.k8s.io/nginx-slim:0.7
image: registry.k8s.io/nginx-slim:0.21
ports:
- containerPort: 80
name: web

View File

@ -0,0 +1,106 @@
---
title: Leases
api_metadata:
- apiVersion: "coordination.k8s.io/v1"
kind: "Lease"
content_type: concept
weight: 30
---
<!-- overview -->
Los sistemas distribuidos suelen necesitar _leases_, que proporcionan un mecanismo para bloquear recursos compartidos
y coordinar la actividad entre los miembros de un conjunto.
En Kubernetes, el concepto de lease (arrendamiento) está representado por objetos [Lease](/docs/reference/kubernetes-api/cluster-resources/lease-v1/)
en el {{< glossary_tooltip text="grupo API" term_id="api-group" >}} de `coordination.k8s.io`,
que se utilizan para capacidades críticas del sistema, como los heartbeats del nodo y la elección del líder a nivel de componente.
<!-- body -->
## Heartbeats del nodo {#node-heart-beats}
Kubernetes utiliza la API Lease para comunicar los heartbeats de los nodos kubelet al servidor API de Kubernetes.
Para cada `Nodo` , existe un objeto `Lease` con un nombre que coincide en el espacio de nombres `kube-node-lease`.
Analizando a detalle, cada hearbeat es una solicitud **update** a este objeto `Lease`, actualizando
el campo `spec.renewTime` del objeto Lease. El plano de control de Kubernetes utiliza la marca de tiempo de este campo
para determinar la disponibilidad de este «Nodo».
Ve [Objetos Lease de nodos](/docs/concepts/architecture/nodes/#heartbeats) para más detalles.
## Elección del líder
Kubernetes también utiliza Leases para asegurar que sólo una instancia de un componente se está ejecutando en un momento dado.
Esto lo utilizan componentes del plano de control como `kube-controller-manager` y `kube-scheduler` en configuraciones de
HA, donde sólo una instancia del componente debe estar ejecutándose activamente mientras las otras
instancias están en espera.
## Identidad del servidor API
{{< feature-state feature_gate_name="APIServerIdentity" >}}
A partir de Kubernetes v1.26, cada `kube-apiserver` utiliza la API Lease para publicar su identidad al resto del sistema.
Aunque no es particularmente útil por sí mismo, esto proporciona un mecanismo para que los clientes
puedan descubrir cuántas instancias de `kube-apiserver` están operando el plano de control de Kubernetes.
La existencia de los objetos leases de kube-apiserver permite futuras capacidades que pueden requerir la coordinación entre
cada kube-apiserver.
Puedes inspeccionar los leases de cada kube-apiserver buscando objetos leases en el namespace `kube-system`
con el nombre `kube-apiserver-<sha256-hash>`. También puedes utilizar el selector de etiquetas `apiserver.kubernetes.io/identity=kube-apiserver`:
```shell
kubectl -n kube-system get lease -l apiserver.kubernetes.io/identity=kube-apiserver
```
```
NAME HOLDER AGE
apiserver-07a5ea9b9b072c4a5f3d1c3702 apiserver-07a5ea9b9b072c4a5f3d1c3702_0c8914f7-0f35-440e-8676-7844977d3a05 5m33s
apiserver-7be9e061c59d368b3ddaf1376e apiserver-7be9e061c59d368b3ddaf1376e_84f2a85d-37c1-4b14-b6b9-603e62e4896f 4m23s
apiserver-1dfef752bcb36637d2763d1868 apiserver-1dfef752bcb36637d2763d1868_c5ffa286-8a9a-45d4-91e7-61118ed58d2e 4m43s
```
El hash SHA256 utilizado en el nombre del lease se basa en el nombre de host del sistema operativo visto por ese servidor API. Cada kube-apiserver debe ser
configurado para utilizar un nombre de host que es único dentro del clúster. Las nuevas instancias de kube-apiserver que utilizan el mismo nombre de host
asumirán los leases existentes utilizando una nueva identidad de titular, en lugar de instanciar nuevos objetos leases. Puedes comprobar el
nombre de host utilizado por kube-apiserver comprobando el valor de la etiqueta `kubernetes.io/hostname`:
```shell
kubectl -n kube-system get lease apiserver-07a5ea9b9b072c4a5f3d1c3702 -o yaml
```
```yaml
apiVersion: coordination.k8s.io/v1
kind: Lease
metadata:
creationTimestamp: "2023-07-02T13:16:48Z"
labels:
apiserver.kubernetes.io/identity: kube-apiserver
kubernetes.io/hostname: master-1
name: apiserver-07a5ea9b9b072c4a5f3d1c3702
namespace: kube-system
resourceVersion: "334899"
uid: 90870ab5-1ba9-4523-b215-e4d4e662acb1
spec:
holderIdentity: apiserver-07a5ea9b9b072c4a5f3d1c3702_0c8914f7-0f35-440e-8676-7844977d3a05
leaseDurationSeconds: 3600
renewTime: "2023-07-04T21:58:48.065888Z"
```
Los leases caducados de los kube-apiservers que ya no existen son recogidos por los nuevos kube-apiservers después de 1 hora.
Puedes desactivar el lease de identidades del servidor API desactivando la opción `APIServerIdentity` de los [interruptores de funcionalidades](/docs/reference/command-line-tools-reference/feature-gates/).
## Cargas de trabajo {#custom-workload}
Tu propia carga de trabajo puede definir su propio uso de los leases. Por ejemplo, puede ejecutar un
{{< glossary_tooltip term_id=controller text=controlador >}} en la que un miembro principal o líder
realiza operaciones que sus compañeros no realizan. Tú defines un Lease para que las réplicas del controlador puedan seleccionar
o elegir un líder, utilizando la API de Kubernetes para la coordinación.
Si utilizas un lease, es una buena práctica definir un nombre para el lease que esté obviamente vinculado a
el producto o componente. Por ejemplo, si tienes un componente denominado Ejemplo Foo, utilice un lease denominado
`ejemplo-foo`.
Si un operador de clúster u otro usuario final puede desplegar varias instancias de un componente, selecciona un nombre
prefijo y elije un mecanismo (como el hash del nombre del despliegue) para evitar colisiones de nombres
para los leases.
Puedes utilizar otro enfoque siempre que consigas el mismo resultado: los distintos productos de software no entren en conflicto entre sí.

View File

@ -447,8 +447,8 @@ Suivez [SIG network](https://github.com/kubernetes/community/tree/master/sig-net
Vous pouvez exposer un service de plusieurs manières sans impliquer directement la ressource Ingress :
* Utilisez [Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer)
* Utilisez [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport)
* Utilisez [Service.Type=LoadBalancer](/fr/docs/concepts/services-networking/service/#loadbalancer)
* Utilisez [Service.Type=NodePort](/fr/docs/concepts/services-networking/service/#type-nodeport)
* Utilisez un [Proxy du port](https://git.k8s.io/contrib/for-demos/proxy-to-service)

View File

@ -0,0 +1,13 @@
---
title: Glossaire de termes
layout: glossary
noedit: true
body_class: glossary
default_active_tag: fundamental
weight: 5
card:
name: reference
weight: 10
title: Glossaire
---

View File

@ -129,7 +129,9 @@ Voici le fichier de configuration pour le déploiement de l'application :
réponse à une requête réussie est un message de bienvenue :
```none
Hello Kubernetes!
Hello, world!
Version: 2.0.0
Hostname: hello-world-2895499144-bsbk5
```
## Utilisation d'un fichier de configuration de service

View File

@ -14,7 +14,7 @@ spec:
spec:
containers:
- name: hello-world
image: gcr.io/google-samples/node-hello:1.0
image: us-docker.pkg.dev/google-samples/containers/gke/hello-app:2.0
ports:
- containerPort: 8080
protocol: TCP

View File

@ -0,0 +1,67 @@
---
title: "माइक्रोप्रोफाइल, कॉन्फिगमैप्स और सीक्रेट्स का उपयोग करके कॉन्फिगरेशन को बाह्यीकृत करना"
content_type: ट्यूटोरियल
weight: 10
---
<!-- overview -->
इस ट्यूटोरियल में आप सीखेंगे कि अपने माइक्रोसर्विस के कॉन्फ़िगरेशन को कैसे और क्यों बाह्यीकृत करना है।
विशेष रूप से, आप सीखेंगे कि एनवायरमेंट वेरिएबल सेट करने के लिए कुबेरनेट्स कॉन्फिगमैप्स और सीक्रेट्स का
उपयोग कैसे करें और फिर माइक्रोप्रोफाइल कॉन्फिग का उपयोग करके उनका उपभोग करें।
## {{% heading "prerequisites" %}}
### कुबेरनेट्स कॉन्फ़िगमैप्स और सीक्रेट बनाना
कुबेरनेट्स में डॉकर कंटेनर के लिए एनवायरमेंट वेरिएबल सेट करने के कई तरीके हैं, जिनमें शामिल हैं: Dockerfile,
kubernetes.yml, Kubernetes ConfigMaps, और Kubernetes Secrets। ट्यूटोरियल में, आप सीखेंगे कि अपने
एनवायरमेंट वेरिएबल सेट करने के लिए कुबेरनेट्स कॉन्फिगमैप्स और कुबेरनेट्स सीक्रेट्स का उपयोग कैसे करें, जिनके वैल्यू
आपके माइक्रोसर्विसेज में इंजेक्ट किए जाएंगे। कॉन्फिगमैप्स और सीक्रेट्स का उपयोग करने का एक लाभ यह है कि
उन्हें कई कंटेनरों में फिर से उपयोग किया जा सकता है, जिसमें विभिन्न कंटेनरों के लिए अलग-अलग एनवायरमेंट वेरिएबल
को सौंपा जाना भी शामिल है।
कॉन्फिगमैप्स एपीआई ऑब्जेक्ट हैं जो गैर-गोपनीय key-value जोड़े को संग्रहीत करते हैं।
इंटरएक्टिव ट्यूटोरियल में आप सीखेंगे कि एप्लिकेशन के नाम को संग्रहीत करने के लिए
कॉन्फिगमैप का उपयोग कैसे करना है। कॉन्फ़िगमैप्स के संबंध में अधिक जानकारी के लिए,
आप [दस्तावेज़ यहाँ पा सकते हैं](/docs/tasks/configure-pod-container/configure-pod-configmap/))।
हालाँकि सीक्रेट्स का उपयोग भी key-value जोड़े को संग्रहीत करने के लिए किया जाता है,
वे कॉन्फिगमैप्स से भिन्न होते हैं क्योंकि वे गोपनीय/संवेदनशील जानकारी के लिए होते हैं और Base64 एन्कोडिंग
का उपयोग करके संग्रहीत होते हैं। यह सीक्रेट को क्रेडेंशियल्स, keys और टोकन जैसी चीज़ों को संग्रहीत करने
के लिए उपयुक्त विकल्प बनाता है, जिनमें से पहला काम आप इंटरैक्टिव ट्यूटोरियल में करेंगे। सीक्रेट के बारे
में अधिक जानकारी के लिए, आप [दस्तावेज़ यहाँ पा सकते हैं](/docs/concepts/configuration/secret/)।
### कोड से कॉन्फ़िग को बाह्यीकृत करना
बाह्यीकृत एप्लिकेशन कॉन्फ़िगरेशन उपयोगी है क्योंकि कॉन्फ़िगरेशन आमतौर पर आपके वातावरण के आधार पर
बदलता है। इसे पूरा करने के लिए, हम Java के Contexts and Dependency Injection (CDI) और माइक्रोप्रोफाइल
कॉन्फ़िगरेशन का उपयोग करेंगे। माइक्रोप्रोफाइल कॉन्फिग माइक्रोप्रोफाइल की एक विशेषता है, जो क्लाउड-नेटिव
माइक्रोसर्विसेज को विकसित करने और डेप्लॉय करने के लिए open Java प्रौद्योगिकियों का एक सेट है।
सीडीआई (CDI) एक स्टैंडर्ड तरीका है जो एप्लिकेशन में डिपेंडेंसी इंजेक्शन (dependency injection) को आसान बनाता है।
इसकी मदद से, एप्लिकेशन को अलग-अलग हिस्सों (beans) से मिलाकर बनाया जा सकता है जो एक-दूसरे से कम जुड़े होते हैं।
इससे एप्लिकेशन को बनाना और सुधारना आसान हो जाता है। माइक्रोप्रोफाइल कॉन्फिग ऐप्स और माइक्रोसर्विसेज को एप्लिकेशन,
रनटाइम और एनवायरमेंट सहित विभिन्न स्रोतों से कॉन्फिग के गुण प्राप्त करने का एक मानक तरीका प्रदान करता है। स्रोत की
परिभाषित प्राथमिकता के आधार पर, गुणों को स्वचालित रूप से गुणों के एक सेट में संयोजित किया जाता है जिसे
एप्लिकेशन एपीआई के माध्यम से एक्सेस कर सकता है। साथ में, सीडीआई और माइक्रोप्रोफाइल का उपयोग कुबेरनेट्स
कॉन्फिगमैप्स और सीक्रेट्स से बाहरी रूप से प्रदान की गई संपत्तियों को पुनः प्राप्त करने और आपके एप्लिकेशन कोड
में इंजेक्ट करने के लिए इंटरएक्टिव ट्यूटोरियल में किया जाएगा।
कई ओपन सोर्स फ्रेमवर्क और रनटाइम माइक्रोप्रोफाइल कॉन्फ़िगरेशन को लागू और समर्थ करते हैं। पूरे इंटरैक्टिव
ट्यूटोरियल के दौरान, आप ओपन लिबर्टी का उपयोग करेंगे, जो क्लाउड-नेटिव ऐप्स और माइक्रोसर्विसेज को बनाने
और चलाने के लिए एक फ्लेक्सिबल ओपन-सोर्स Java रनटाइम है। हालाँकि, इसके बजाय किसी भी माइक्रोप्रोफाइल
संगत रनटाइम का उपयोग किया जा सकता है।
## {{% heading "objectives" %}}
* एक कुबेरनेट्स कॉन्फ़िगमैप और सीक्रेट बनाएं
* माइक्रोप्रोफाइल कॉन्फ़िगरेशन का उपयोग करके माइक्रोसर्विस कॉन्फ़िगरेशन इंजेक्ट करें
<!-- lessoncontent -->
## उदाहरण: माइक्रोप्रोफाइल, कॉन्फिगमैप्स और सीक्रेट्स का उपयोग करके कॉन्फिगरेशन को बाह्यीकृत करना
[इंटरैक्टिव ट्यूटोरियल प्रारंभ करें](/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive/)

View File

@ -0,0 +1,13 @@
---
title: Glossario
layout: glossary
noedit: true
body_class: glossary
default_active_tag: fundamental
weight: 5
card:
name: reference
weight: 10
title: Glossario
---

View File

@ -5,7 +5,7 @@ date: 2024-03-07
slug: cri-o-seccomp-oci-artifacts
---
**著者:** Kevin Hannon (Red Hat)
**著者:** Sascha Grunert
**翻訳者:** Taisuke Okamoto (IDC Frontier Inc), atoato88 (NEC Corporation), Junya Okabe (University of Tsukuba)

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -0,0 +1,152 @@
---
layout: blog
title: "Kubernetesの10年間の歴史"
date: 2024-06-06
slug: 10-years-of-kubernetes
author: >
[Bob Killen](https://github.com/mybobbytables) (CNCF),
[Chris Short](https://github.com/chris-short) (AWS),
[Frederico Muñoz](https://github.com/fsmunoz) (SAS),
[Kaslin Fields](https://github.com/kaslin) (Google),
[Tim Bannister](https://github.com/sftim) (The Scale Factory),
and every contributor across the globe
---
**翻訳者**: [Junya Okabe](https://github.com/Okabe-Junya) (University of Tsukuba), [Daiki Hayakawa(bells17)](https://github.com/bells17) ([3-shake Inc.](https://3-shake.com/en/)), [Kaito Ii](https://github.com/kaitoii11) (Hewlett Packard Enterprise)
![KCSEU 2024 group photo](kcseu2024.jpg)
10年前の2014年6月6日、Kubernetesの[最初のコミット](https://github.com/kubernetes/kubernetes/commit/2c4b3a562ce34cddc3f8218a2c4d11c7310e6d56)がGitHubにプッシュされました。
Go、Bash、Markdownで書かれた250のファイルと47,501行のコードを含むその最初のコミットが、今日のKubernetesプロジェクトの始まりでした。
それから10年後の今日、Kubernetesが44か国から[8,000社以上の企業](https://www.cncf.io/reports/kubernetes-project-journey-report/)、[88,000人以上のコントリビューター](https://k8s.devstats.cncf.io/d/24/overall-project-statistics?orgId=1)を有する、これまでで最大のオープンソースプロジェクトの一つに成長するとは誰が予想したでしょうか。
<img src="kcscn2019.jpg" alt="KCSCN 2019" class="left" style="max-width: 20em; margin: 1em" >
このマイルストーンはKubernetesだけでなく、そこから生まれたクラウドネイティブエコシステムにとっても重要なものです。
CNCFには[約200のプロジェクト](https://all.devstats.cncf.io/d/18/overall-project-statistics-table?orgId=1)があり、[240,000人以上のコントリビューター](https://all.devstats.cncf.io/d/18/overall-project-statistics-table?orgId=1)からのコントリビューションがあります。
また、より広いエコシステムの中でも数千人のコントリビューターがいます。
Kubernetesが今日の姿になれたのは、彼らや[700万人以上の開発者](https://www.cncf.io/blog/2022/05/18/slashdata-cloud-native-continues-to-grow-with-more-than-7-million-developers-worldwide/)、さらに多くのユーザーコミュニティがエコシステムを形作る手助けをしてくれたおかげです。
## Kubernetesの始まり - 技術の収束
Kubernetesの元となるアイディアは、([2013年に登場した](https://blog/2018/07/20/the-history-of-kubernetes-the-community-behind-it/))最初のコミットや最初のプロトタイプの前から存在していました。
2000年代初頭、ムーアの法則が有効に機能していました。
コンピューティングハードウェアは非常に速い速度でますます強力になり、それに対応してアプリケーションもますます複雑化していきました。
このハードウェアのコモディティ化とアプリケーションの複雑化の組み合わせにより、ソフトウェアをハードウェアからさらに抽象化する必要が生じ、解決策が現れ始めました。
当時の多くの企業と同様にGoogleも急速に拡大しており、同社のエンジニアたちはLinuxカーネル内での隔離の形態を作り出すというアイデアに興味を持っていました。
Googleのエンジニア、Rohit Sethはそのコンセプトを[2006年のメール](https://lwn.net/Articles/199643/)で説明しました。
> ワークロードのメモリやタスクなどのシステムリソースの使用を追跡し、課金する構造を示すためにコンテナという用語を使用します。
<img src="future.png" alt="The future of Linux containers" class="right" style="max-width: 20em; margin: 1em">
2013年3月、PyConでSolomon Hykesが行った5分間のライトニングトーク[The future of Linux Containers](https://youtu.be/wW9CAH9nSLs?si=VtK_VFQHymOT7BIB)では、Linuxコンテナを作成および使用するためのオープンソースツールである「Docker」が紹介されました。
DockerはLinuxコンテナに使いやすさをもたらし、これまで以上に多くのユーザーが利用できるようになりました。
Dockerの人気が急上昇し、Linuxコンテナの抽象化を誰もが利用できるようにしたことで、アプリケーションをより移植性が高く、再現性のある方法で実行できるようになりました。
しかし、依然としてスケールの問題は残っていました。
Googleのアプリケーションオーケストレーションをスケールで管理するBorgシステムは、2000年代半ばにLinuxコンテナを採用しました。
その後、GoogleはOmegaと呼ばれるシステムの新バージョンの開発も開始しました。
BorgとOmegaシステムに精通していたGoogleのエンジニアたちは、Dockerによって駆動するコンテナ化の人気を目の当たりにしました。
そしてBrendan Burnsの[ブログ](/blog/2018/07/20/the-history-of-kubernetes-the-community-behind-it/)で説明されているように、オープンソースのコンテナオーケストレーションシステムの必要性だけでなく、その「必然性」を認識しました。
この認識は2013年秋にJoe Beda、Brendan Burns、Craig McLuckie、Ville Aikas、Tim Hockin、Dawn Chen、Brian Grant、Daniel Smithを含む小さなチームにKubernetesのプロジェクトを始めるインスピレーションを与えました。
## Kubernetesの10年間
<img src="kubeconeu2017.jpg" alt="KubeCon EU 2017" class="left" style="max-width: 20em; margin: 1em">
Kubernetesの歴史は2014年6月6日のその歴史的なコミットと、2014年6月10日の[DockerCon 2014でのGoogleエンジニアEric Brewerによる基調講演](https://youtu.be/YrxnVKZeqK8?si=Q_wYBFn7dsS9H3k3)(およびそれに対応する[Googleブログ](https://cloudplatform.googleblog.com/2014/06/an-update-on-container-support-on-google-cloud-platform.html))でのプロジェクト発表から始まります。
その後の1年間で、主に[GoogleとRed Hatからのコントリビューター](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=Before%20joining%20CNCF&var-metric=contributors)による小さなコミュニティがプロジェクトに取り組み、[2015年7月21日にバージョン1.0のリリース](https://cloudplatform.googleblog.com/2015/07/Kubernetes-V1-Released.html)に至りました。
1.0と同時に、GoogleはKubernetesをLinux Foundationの新たに設立された部門である[Cloud Native Computing Foundation (CNCF)](https://www.cncf.io/announcements/2015/06/21/new-cloud-native-computing-foundation-to-drive-alignment-among-container-technologies/)に寄贈することを発表しました。
1.0に到達したものの、Kubernetesプロジェクトは依然として使いにくく理解しにくいものでした。
KubernetesのコントリビューターであるKelsey Hightowerはプロジェクトの使いやすさの欠点に特に注目し、2016年7月7日に彼の有名な["Kubernetes the Hard Way"ガイドの最初のコミット](https://github.com/kelseyhightower/kubernetes-the-hard-way/commit/9d7ace8b186f6ebd2e93e08265f3530ec2fba81c)をプッシュしました。
プロジェクトは最初の1.0リリース以来大きく変わり、いくつかの大きな成果を経験しました。
たとえば、[1.16でのCustom Resource Definition (CRD)のGA](https://kubernetes.io/blog/2019/09/18/kubernetes-1-16-release-announcement/)や、[1.23での完全なデュアルスタックサポートの開始](https://kubernetes.io/blog/2021/12/08/dual-stack-networking-ga/)などです。
また、[1.22での広く使用されているベータ版APIの削除](https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/)や、[Dockershimの廃止](https://kubernetes.io/blog/2020/12/02/dockershim-faq/)から学んだコミュニティの「教訓」もあります。
1.0以降の注目すべきアップデート、マイルストーン、およびイベントには以下のものがあります。
* 2016年12月 - [Kubernetes 1.5](/blog/2016/12/kubernetes-1-5-supporting-production-workloads/)でCRIの最初のサポートとアルファ版Windowsードサポートによるランタイムプラグイン機能が導入されました。また、OpenAPIが初めて登場し、クライアントが拡張されたAPIを認識できるようになりました。
* このリリースでは、StatefulSetとPodDisruptionBudgetがベータ版で導入されました。
* 2017年4月 - [ロールベースアクセス制御(RBAC)](/blog/2017/04/rbac-support-in-kubernetes/)の導入。
* 2017年6月 - [Kubernetes 1.7](/blog/2017/06/kubernetes-1-7-security-hardening-stateful-application-extensibility-updates/)でThirdPartyResource (TPR)がCustomResourceDefinition (CRD)に置き換えられました。
* 2017年12月 - [Kubernetes 1.9](/blog/2017/12/kubernetes-19-workloads-expanded-ecosystem/)ではWorkload APIがGA(一般提供)となりました。リリースブログには「Kubernetesで最もよく使用されるオブジェクトの一つであるDeploymentとReplicaSetは、1年以上の実際の使用とフィードバックを経て安定しました」と書かれています。
* 2018年12月 - Kubernetes 1.13でContainer Storage Interface (CSI)がGAに達しました。また最小限のクラスターをブートストラップするためのkubeadmツールがGAに達し、CoreDNSがデフォルトのDNSサーバーとなりました。
* 2019年9月 - Kubernetes 1.16で[Custom Resource DefinitionがGAに達し](https://kubernetes.io/blog/2019/09/18/kubernetes-1-16-release-announcement/)ました。
* 2020年8月 - [Kubernetes 1.19](/blog/2016/12/kubernetes-1-5-supporting-production-workloads/)でリリースのサポート期間が1年に延長されました。
* 2020年12月 - Kubernetes 1.20で[Dockershimが廃止](https://kubernetes.io/blog/2020/12/18/kubernetes-1.20-pod-impersonation-short-lived-volumes-in-csi/)されました。
* 2021年4月 - [Kubernetesのリリース頻度が変更](https://kubernetes.io/blog/2021/07/20/new-kubernetes-release-cadence/#:~:text=On%20April%2023%2C%202021%2C%20the,Kubernetes%20community's%20contributors%20and%20maintainers.)され、年間4回から3回に減少されました。
* 2021年7月 - 広く使用されているベータ版APIが[Kubernetes 1.22で削除](https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/)されました。
* 2022年5月 - Kubernetes 1.24で[ベータ版APIがデフォルトで無効](https://kubernetes.io/blog/2022/05/03/kubernetes-1-24-release-announcement/)にされ、アップグレードの競合を減らすとともに[Dockershimが削除](https://kubernetes.io/dockershim)されました。その結果、[多くのユーザーの混乱](https://www.youtube.com/watch?v=a03Hh1kd6KE)を引き起こしました(その後、[コミュニケーションを改善しました](https://github.com/kubernetes/community/tree/master/communication/contributor-comms))。
* 2022年12月 - Kubernetes 1.26ではAI/ML/バッチワークロードのサポートを強化するための大規模なバッチおよび[Job APIのオーバーホール](https://kubernetes.io/blog/2022/12/29/scalable-job-tracking-ga/)が行われました。
**PS:** プロジェクトがどれだけ進化したか自分で見てみたいですか?
コミュニティメンバーのCarlos Santana、Amim Moises Salum Knabben、James Spurinが作成した[Kubernetes 1.0クラスターを立ち上げるためのチュートリアル](https://github.com/spurin/kubernetes-v1.0-lab)をチェックしてみてください。
---
Kubernetesには数え切れないほどの拡張するポイントがあります。
もともとはDocker専用に設計されていましたが、現在ではCRI標準に準拠する任意のコンテナランタイムをプラグインできます。
他にもストレージ用のCSIやネットワーキング用のCNIなどのインターフェースがあります。
そしてこれはできることのほんの一部に過ぎません。
過去10年間で新しいパターンがいくつも登場しました。
例えば、[Custom Resource Definition](/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources/) (CRD)を使用してサードパーティのコントローラーをサポートすることができます。
これは現在Kubernetesエコシステムの大きな一部となっています。
このプロジェクトを構築するコミュニティも、この10年間で非常に大きくなりました。
[DevStats](https://k8s.devstats.cncf.io/d/24/overall-project-statistics?orgId=1)を使用すると、この10年間でKubernetesを[世界で2番目に大きなオープンソースプロジェクト](https://www.cncf.io/reports/kubernetes-project-journey-report/)にした驚異的なコントリビューションの量を確認できます。
* **88,474**人のコントリビューター
* **15,121**人のコードコミッター
* **4,228,347**件のコントリビューション
* **158,530**件のIssue
* **311,787**件のPull Request
## 今日のKubernetes
<img src="welcome.jpg" alt="KubeCon NA 2023" class="left" style="max-width: 20em; margin: 1em">
初期の頃からこのプロジェクトは技術的能力、利用状況、およびコントリビューションの面で驚異的な成長を遂げてきました。
プロジェクトは今もなおユーザーにより良いサービスを提供するために積極的に改善に取り組んでいます。
次回の1.31リリースでは、長期にわたる重要なプロジェクトの完成を祝います。
それはインツリークラウドプロバイダーのコードの削除です。
この[Kubernetesの歴史上最大のマイグレーション](https://kubernetes.io/blog/2024/05/20/completing-cloud-provider-migration/)では、約150万行のコードが削除され、コアコンポーネントのバイナリサイズが約40%削減されました。
プロジェクトの初期には、拡張性が成功の鍵であることは明らかでした。
しかし、その拡張性をどのように実現するかは常に明確ではありませんでした。
このマイグレーションにより、Kubernetesの核となるコードベースからさまざまなベンダー固有の機能が削除されました。
ベンダー固有の機能は、今後は[Custom Resource Definition (CRD)](/ja/docs/concepts/extend-kubernetes/api-extension/custom-resources/)や[Gateway API](https://gateway-api.sigs.k8s.io/)などの他のプラグイン拡張機能やパターンによってよりよく提供されるようになります。
Kubernetesは、膨大なユーザーベースにサービスを提供する上で新たな課題にも直面しており、コミュニティはそれに適応しています。
その一例が、新しいコミュニティ所有のregistry.k8s.ioへのイメージホスティングの移行です。
ユーザーに事前コンパイル済みのバイナリイメージを提供するためのエグレスの帯域幅とコストは非常に大きなものとなっています。
この新しいレジストリの変更により、コミュニティはこれらの便利なイメージをよりコスト効率およびパフォーマンス効率の高い方法で提供し続けることができます。
必ず[ブログ記事](https://kubernetes.io/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga/)をチェックし、registry.k8s.ioを使用するように更新してください
## Kubernetesの未来
<img src="lts.jpg" alt="" class="right" width="300px" style="max-width: 20em; margin: 1em">
10年が経ち、Kubernetesの未来は依然として明るく見えます。
コミュニティはユーザー体験の改善とプロジェクトの持続可能性を向上させる変更を優先しています。
アプリケーション開発の世界は進化し続けており、Kubernetesもそれに合わせて変化していく準備ができています。
2024年にはAIの登場がかつてニッチなワークロードタイプを重要なものへと変えました。
分散コンピューティングとワークロードスケジューリングは常に人工知能(AI)、機械学習(ML)、および高性能コンピューティング(HPC)ワークロードのリソース集約的なニーズと密接に関連してきました。
コントリビューターは、新しく開発されたワークロードのニーズとそれらにKubernetesがどのように最適に対応できるかに注目しています。
新しい[Serving Working Group](https://github.com/kubernetes/community/tree/master/wg-serving)は、コミュニティがこれらのワークロードのニーズに対処するためにどのように組織化されているかの一例です。
今後数年でKubernetesがさまざまな種類のハードウェアを管理する能力や、ハードウェア全体でチャンクごとに実行される大規模なバッチスタイルのワークロードのスケジューリング能力に関して改善が見られるでしょう。
Kubernetesを取り巻くエコシステムは成長し続け、進化していきます。
将来的にはインツリーベンダーコードのマイグレーションやレジストリの変更など、プロジェクトの持続可能性を維持するための取り組みがますます重要になるでしょう。
Kubernetesの次の10年は、ユーザーとエコシステム、そして何よりもそれに貢献する人々によって導かれるでしょう。
コミュニティは新しいコントリビューターを歓迎しています。
コントリビューションに関する詳細は、[新しいコントリビューター向けのガイド](https://k8s.dev/contributors)で確認できます。
Kubernetesの未来を一緒に築いていくことを楽しみにしています
{{< figure src="kcsna2023.jpg" alt="KCSNA 2023">}}

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 479 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

View File

@ -32,7 +32,7 @@ cgroup v2はリソース管理機能を強化した統合制御システムを
- ページキャッシュの書き戻しといった、非即時のリソース変更
Kubernetesのいくつかの機能では、強化されたリソース管理と隔離のためにcgroup v2のみを使用しています。
例えば、[MemoryQoS](/blog/2021/11/26/qos-memory-resources/)機能はメモリーQoSを改善し、cgroup v2の基本的な機能に依存しています。
例えば、[MemoryQoS](/docs/concepts/workloads/pods/pod-qos/#memory-qos-with-cgroup-v2)機能はメモリーQoSを改善し、cgroup v2の基本的な機能に依存しています。
## cgroup v2を使う {#using-cgroupv2}

View File

@ -142,7 +142,7 @@ Cluster Domain | Service (ns/name) | StatefulSet (ns/name) | StatefulSet Domain
### 安定したストレージ
Kubernetesは各VolumeClaimTemplateに対して、1つの[PersistentVolume](/docs/concepts/storage/persistent-volumes/)を作成します。上記のnginxの例において、各Podは`my-storage-class`というStorageClassをもち、1GiBのストレージ容量を持った単一のPersistentVolumeを受け取ります。もしStorageClassが指定されていない場合、デフォルトのStorageClassが使用されます。PodがNode上にスケジュール(もしくは再スケジュール)されたとき、その`volumeMounts`はPersistentVolume Claimに関連したPersistentVolumeをマウントします。
StatefulSetで定義された各VolumeClaimTemplateに対して、各Podは1つのPersistentVolumeClaimを受け取ります。上記のnginxの例において、各Podは`my-storage-class`というStorageClassをもち、1GiBのストレージ容量を持った単一のPersistentVolumeを受け取ります。もしStorageClassが指定されていない場合、デフォルトのStorageClassが使用されます。PodがNode上にスケジュール(もしくは再スケジュール)されたとき、その`volumeMounts`はPersistentVolume Claimに関連したPersistentVolumeをマウントします。
注意点として、PodのPersistentVolume Claimと関連したPersistentVolumeは、PodやStatefulSetが削除されたときに削除されません。
削除する場合は手動で行わなければなりません。

View File

@ -0,0 +1,19 @@
---
title: CRI-O
id: cri-o
date: 2019-05-14
full_link: https://cri-o.io/#what-is-cri-o
short_description: >
Kubernetesに特化した軽量コンテナランタイム
aka:
tags:
- tool
---
Kubernetes CRIと一緒にOCIコンテナランタイムを使うためのツールです。
<!--more-->
CRI-OはOpen Container Initiative (OCI) [runtime spec](https://www.github.com/opencontainers/runtime-spec)と互換性がある{{< glossary_tooltip text="コンテナ" term_id="container" >}}ランタイムを使用できるようにするための{{< glossary_tooltip term_id="cri" >}}の実装の1つです。
CRI-Oのデプロイによって、Kubernetesは任意のOCI準拠のランタイムを、{{< glossary_tooltip text="Pod" term_id="pod" >}}を実行するためのコンテナランタイムとして利用することと、リモートレジストリからOCIコンテナイメージを取得することができるようになります。

View File

@ -0,0 +1,19 @@
---
title: カスタムリソース定義(Custom Resource Definitions)
id: CustomResourceDefinition
date: 2018-04-12
full_link: /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/
short_description: >
拡張APIサーバーを構築することなく、Kubernetes APIサーバーに追加するリソースを定義するカスタムコードです。
aka:
tags:
- fundamental
- operation
- extension
---
拡張APIサーバーを構築することなく、Kubernetes APIサーバーに追加するリソースを定義するカスタムコードです。
<!--more-->
公開されているサポート対象のAPIリソースではニーズを満たせない場合、カスタムリソース定義(Custom Resource Definitions)を使用することで環境に合わせてKubernetes APIを拡張できます。

View File

@ -2,6 +2,7 @@
title: Glossário
layout: glossary
noedit: true
body_class: glossary
default_active_tag: fundamental
weight: 5
card:

View File

@ -6,6 +6,7 @@ approvers:
title: Глосарій
layout: glossary
noedit: true
body_class: glossary
default_active_tag: fundamental
weight: 5
card:

Some files were not shown because too many files have changed in this diff Show More