test: add memif & vcl test configurations

Signed-off-by: Nathan Skrzypczak <nathan.skrzypczak@gmail.com>
This commit is contained in:
Nathan Skrzypczak 2022-02-03 17:02:43 +01:00 committed by Aloÿs
parent f6d019b5e5
commit 8bc583dc25
8 changed files with 555 additions and 2 deletions

View File

@ -15,6 +15,7 @@
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $SCRIPTDIR/shared.sh
VPP_DATAPLANE_DIRECTORY="$(cd $SCRIPTDIR/../.. >/dev/null 2>&1 && pwd)"
PERFTESTDIR=/path/to/perf-tests # git clone git@github.com:kubernetes/perf-tests.git
@ -107,7 +108,10 @@ test_apply ()
get_nodes
k_create_namespace $NAME
export VPP_DATAPLANE_DIRECTORY=$VPP_DATAPLANE_DIRECTORY
sed -e "s/_NODE_1_/${NODES[0]}/" -e "s/_NODE_2_/${NODES[1]}/" $YAML_FILE | \
envsubst | \
kubectl apply -f -
}
@ -187,6 +191,7 @@ kube_test_cli ()
fi
}
setup_test_class_WRK1 () { return; }
setup_test_WRK1 ()
{
if [ x$OTHERHOST = x ]; then
@ -203,6 +208,7 @@ setup_test_WRK1 ()
done
}
setup_test_class_WRK2 () { return; }
setup_test_WRK2 ()
{
cp /tmp/calico-vpp.yaml $DIR/cni.yaml
@ -214,6 +220,7 @@ setup_test_WRK2 ()
done
}
setup_test_class_IPERF () { return; }
setup_test_IPERF ()
{
N_FLOWS=${N_FLOWS:=4}
@ -229,6 +236,7 @@ run_test_IPERF ()
kubectl exec -it iperf-client -n iperf -- $TEST_CMD > $DIR/test_output
}
setup_test_class_IPERF3 () { return; }
setup_test_IPERF3 ()
{
N_FLOWS=${N_FLOWS:=4}
@ -244,6 +252,7 @@ run_test_IPERF3 ()
kubectl exec -it iperf3-client -n iperf3 -- $TEST_CMD > $DIR/test_output
}
setup_test_class_IPERF3_VCL () { return; }
setup_test_IPERF3_VCL ()
{
N_FLOWS=${N_FLOWS:=4}
@ -259,6 +268,7 @@ run_test_IPERF3_VCL ()
kubectl exec -it iperf3-client -n iperf3-vcl -- $TEST_CMD > $DIR/test_output
}
setup_test_class_IPERF3_VCL_TLS () { return; }
setup_test_IPERF3_VCL_TLS ()
{
N_FLOWS=${N_FLOWS:=4}
@ -276,6 +286,40 @@ run_test_IPERF3_VCL_TLS ()
kubectl exec -it iperf3-client -n iperf3-vcl -- $TEST_CMD > $DIR/test_output
}
setup_test_class_MEMIF () { return; }
setup_test_MEMIF ()
{
cp /tmp/calico-vpp.yaml $DIR/cni.yaml
$SCRIPTDIR/vppdev.sh vppctl node1 cnat session purge
}
mvpp_vppctl ()
{
kubectl exec -i -n mvpp mvpp -- /home/hostuser/vppctl.sh $@ > $DIR/test_output
}
# this test assumes
# - we have mvpp up & running (launched by user), and attached to the memif
# - We have trex cloned in ~/trex-core on ${OTHERHOST} and trex is running
# - We have vpp-dataplane cloned on ${OTHERHOST} in the same directory as here
run_test_MEMIF ()
{
if [ x$ISVIP = xyes ]; then
DST_IP=$( kubectl get svc -n mvpp mvpp-service -o go-template --template='{{printf "%s\n" .spec.clusterIP}}' )
else
DST_IP=$( kubectl get pods -n mvpp -o wide | grep mvpp | awk '{print $6}' )
fi
TEST_CMD="$SCRIPTDIR/trex_udp_gen.py --ip ${DST_IP} --frame-size ${FRAME_SIZE} -m 100% -d ${TEST_LEN} --limit-flows ${LIMIT_FLOWS}"
echo "Running test : ${TEST_CMD}"
echo $TEST_CMD > $DIR/test_command.sh
mvpp_vppctl monitor interface memif1/0 interval 1 count ${TEST_LEN} &
MVPPPID=$?
PYTHONPATH='~/trex-core/scripts/automation/trex_control_plane/interactive'
ssh -t $OTHERHOST PYTHONPATH=$PYTHONPATH $TEST_CMD > $DIR/trex_output
echo "waiting..."
wait $MYVPPPID
}
run_test_WRK1 ()
{
TEST_SZ=${TEST_SZ:=4096} # 4096 // 2MB
@ -286,6 +330,54 @@ run_test_WRK1 ()
$TEST_CMD > $DIR/test_output
}
setup_test_ENVOY ()
{
cp /tmp/calico-vpp.yaml $DIR/cni.yaml
if [ x$ISVPP = xyes ]; then
$SCRIPTDIR/vppdev.sh vppctl node1 cnat session purge
else
sudo conntrack -F
fi
}
run_envoy_linux ()
{
kubectl exec -n envoy envoy-linux -- taskset -c ${CPUS} \
envoy -c /etc/envoy/envoy.yaml \
--concurrency ${N_ENVOYS} > $USER_DIR/envoy_output 2>&1
}
run_envoy_vcl ()
{
kubectl exec -n envoy envoy-vcl -- taskset -c ${CPUS} \
envoy -c /etc/envoy/envoyvcl.yaml \
--concurrency ${N_ENVOYS} > $USER_DIR/envoy_output 2>&1
}
setup_test_class_ENVOY ()
{
sudo pkill envoy
if [ x$ISVCL = xyes ]; then
$SCRIPTDIR/vppdev.sh vppctl node1 set cnat snat-policy prefix 20.0.0.2/32
run_envoy_vcl &
else
run_envoy_linux &
fi
}
run_test_ENVOY ()
{
if [ x$ISVCL = xyes ]; then
DST_IP=$( kubectl get pods -n envoy -o wide | grep envoy-vcl | awk '{print $6}' )
else
DST_IP=$( kubectl get pods -n envoy -o wide | grep envoy-linux | awk '{print $6}' )
fi
TEST_CMD="$HOME/wrk/wrk.py taskset -c 0-42 wrk -c300 -t30 -d${TEST_LEN}s http://${DST_IP}:10001/64B.json"
echo "Running test : ${TEST_CMD}"
echo $TEST_CMD > $DIR/test_command.sh
ssh -t $OTHERHOST $TEST_CMD > $DIR/test_output
}
run_test_WRK2 ()
{
TEST_SZ=${TEST_SZ:=4096} # 4096 // 2MB
@ -331,7 +423,7 @@ test_run ()
shift
done
if [ x$CPUS = x ]; then
if [ x$CPUS = x ] && [ x$CASE != xMEMIF ]; then
echo "provide CPUS=27-35,39-47"
exit 1
fi
@ -352,6 +444,8 @@ test_run ()
exit 1
fi
mkdir -p $USER_DIR
setup_test_class_$CASE
for i in $(seq $N_TESTS); do
echo "Test run #${i}"
TEST_N=$i DIR=$USER_DIR/test_${i} test_run_one
@ -382,6 +476,20 @@ get_wrk_csv_output ()
tail -1 $FILE | sed 's/[^[:print:]]//g'
}
get_mvpp_csv_output ()
{
FILE=$1/test_output
cat $FILE | \
tail -n +${TEST_SKIP} | \
head -n +${TEST_SKIP} | \
awk '{print $2}' | \
sed s/Mpps//g | \
awk '{BPS+=$1}
END {
printf "%.2f", BPS/NR
}'
}
get_avg_iperf_bps ()
{
FILE=$1/test_output
@ -414,8 +522,14 @@ get_avg_report ()
CASE=$(cat $DIR/testcase)
if [ x$CASE = xIPERF ] || [ x$CASE = xIPERF3 ] || [ x$CASE = xIPERF3_VCL ] || [ x$CASE = xIPERF3_VCL_TLS ]; then
echo "$TEST_N;$(get_avg_iperf_bps $DIR);$(get_avg_cpu $DIR node1);$(get_avg_cpu $DIR node2)"
elif [ x$CASE = xWRK ] || [ x$CASE = xWRK2 ]; then
echo "$TEST_N;$(get_wrk_csv_output $DIR);$(get_avg_cpu $DIR node1);$(get_avg_cpu $DIR node2)"
elif [ x$CASE = xENVOY ]; then
echo "$TEST_N;$(get_wrk_csv_output $DIR);$(get_avg_cpu $DIR node1)"
elif [ x$CASE = xMEMIF ]; then
echo "$TEST_N;$(get_mvpp_csv_output $DIR);$(get_avg_cpu $DIR node1)"
else
echo "$TEST_N;$(get_wrk_csv_output $DIR);$(get_avg_cpu $DIR node1);$(get_avg_cpu $DIR node2)"
echo "Unknown case"
fi
}

138
test/scripts/trex_udp_gen.py Executable file
View File

@ -0,0 +1,138 @@
#!/usr/bin/python
import argparse;
from trex_stl_lib.api import *
class STLS1(object):
def __init__(self, args):
self.dst_addr = args.dst_addr
self.pkt_size = args.pkt_size
self.limit_flows = args.limit_flows
def create_stream (self):
base_pkt = Ether()/IP(dst=self.dst_addr)/UDP(dport=4444)
pad_len = self.pkt_size - len(base_pkt)
base_pkt = base_pkt / Raw('a' * pad_len)
vm = STLVM()
vm.tuple_var(name="tuple", ip_min="10.0.0.2", ip_max="10.0.0.255",
port_min=1025, port_max=65535, limit_flows=self.limit_flows)
vm.write(fv_name="tuple.ip", pkt_offset="IP.src")
vm.write(fv_name="tuple.port", pkt_offset="UDP.sport")
# vm.fix_chksum()
vm.fix_chksum_hw(l3_offset='IP', l4_offset='UDP', l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
return STLStream(packet=STLPktBuilder(pkt=base_pkt, vm=vm), mode=STLTXCont())
def get_streams (self, direction = 0, **kwargs):
return [self.create_stream()]
# dynamic load - used for trex console or simulator
# e.g. :
# reset ; service ; arp ; service --off ; start -f stl/this_file.py -m 10mbps -p 1
def register():
return STLS1()
def connect_and_run_test (args):
stream = STLS1(args).get_streams()[0]
c = STLClient() # Connect to localhost
try:
# connect to server
print("connect...")
c.connect()
c.acquire(force=True)
# add both streams to ports
c.add_streams(stream, ports=[0])
# clear the stats before injecting
c.clear_stats()
c.start(ports=[0], mult=args.multiplier, duration=args.duration)
# block until done
c.wait_on_traffic(ports=[0])
# read the stats after the test
stats = c.get_stats()
print(json.dumps(stats[0], indent=4, separators=(',', ': '), sort_keys=True))
except STLError as e:
print("FAILED")
print(e)
finally:
c.disconnect()
def process_options ():
parser = argparse.ArgumentParser(usage="""
connect to TRex and send burst of packets
examples
stl_run_udp_simple.py -s 9001
stl_run_udp_simple.py -s 9000 -d 2
stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps
stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps --debug
then run the simulator on the output
./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet
""",
description="example for TRex api",
epilog=" written by hhaim");
parser.add_argument("-s", "--frame-size",
dest="pkt_size",
help='L2 frame size in bytes without FCS',
default=64,
type = int,
)
parser.add_argument("--ip",
dest="dst_addr",
help='remote trex ip default local',
default="127.0.0.1",
type = str
)
parser.add_argument('-d','--duration',
dest='duration',
help='duration in second ',
default=10,
type = int,
)
parser.add_argument('-m','--multiplier',
dest='multiplier',
help='speed in gbps/pps for example 1gbps, 1mbps, 1mpps ',
default="1mbps"
)
parser.add_argument('-f','--limit-flows',
dest='limit_flows',
help='Maximum number of flows',
default=10000,
type=int,
)
return parser.parse_args()
def main():
args = process_options ()
connect_and_run_test(args)
if __name__ == "__main__":
main()

28
test/yaml/envoy/README.md Normal file
View File

@ -0,0 +1,28 @@
## Envoy VCL/linux testing
This is a simple toy setup for running envoy within VPP attached with the VCL
This testing was done in a single node cluster, with `20.0.0.1/24` being the node
address, connected to another node with `20.0.0.2/24`
Envoy is configured in both cases to listen on `podID:10001` and proxy to `20.0.0.2:80`
Service addresses are also configured
In order for this to work, we need to remove sNAT (either globally, or just for our peer address)
````
set cnat snat-policy prefix 20.0.0.2/32
````
Then create two envoy pods (with and without VCL)
````bash
test.sh up envoy
````
To start envoy
````bash
# with VCL
kubectl exec -it -n envoy envoy-vcl -- taskset -c 0-3 envoy -c /etc/envoy/envoyvcl.yaml --concurrency 4
# with linux
kubectl exec -it -n envoy envoy-linux -- taskset -c 0-3 envoy -c /etc/envoy/envoy.yaml --concurrency 4
````

View File

@ -0,0 +1,50 @@
admin:
access_log_path: /tmp/envoy.log
address:
socket_address:
address: 0.0.0.0
port_value: 8081
static_resources:
listeners:
# define a reverse proxy on :10001 that always uses :80 as an origin.
- name: listener_0
address:
socket_address:
protocol: TCP
address: 0.0.0.0
port_value: 10001
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: proxy_service
http_filters:
- name: envoy.filters.http.router
clusters:
- name: proxy_service
connect_timeout: 0.25s
type: STATIC
# dns_lookup_family: V4_ONLY
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: proxy_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
# This is the address/port we proxy to
address: 20.0.0.2
port_value: 80

View File

@ -0,0 +1,55 @@
admin:
access_log_path: /tmp/envoy.log
address:
socket_address:
address: 0.0.0.0
port_value: 8081
static_resources:
listeners:
# define a reverse proxy on :10001 that always uses :80 as an origin.
- name: listener_0
address:
socket_address:
protocol: TCP
address: 0.0.0.0
port_value: 10001
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: proxy_service
http_filters:
- name: envoy.filters.http.router
clusters:
- name: proxy_service
connect_timeout: 0.25s
type: LOGICAL_DNS
dns_lookup_family: V4_ONLY
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: proxy_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
# This is the address/port we proxy to
address: 20.0.0.2
port_value: 80
bootstrap_extensions:
- name: envoy.extensions.vcl.vcl_socket_interface
typed_config:
"@type": type.googleapis.com/envoy.extensions.vcl.v3alpha.VclSocketInterface
default_socket_interface: "envoy.extensions.vcl.vcl_socket_interface"

92
test/yaml/envoy/test.yaml Normal file
View File

@ -0,0 +1,92 @@
apiVersion: v1
kind: Pod
metadata:
name: envoy-vcl
namespace: envoy
labels:
app: envoy-vcl
annotations:
"cni.projectcalico.org/vpp.vcl": "enable"
spec:
containers:
- name: envoy-vcl
image: envoyproxy/envoy-contrib:v1.21-latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5001
- containerPort: 5003
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- mountPath: /home/hostuser
name: hostuser
- mountPath: /etc/envoy/envoy.yaml
name: envoyyaml
- mountPath: /etc/vpp/vcl.conf
name: vclconf
nodeName: _NODE_1_
volumes:
- name: hostuser
hostPath:
path: ${HOME}
- name: envoyyaml
hostPath:
path: ${VPP_DATAPLANE_DIRECTORY}/test/yaml/envoy/envoyvcl.yaml
- name: vclconf
hostPath:
path: ${VPP_DATAPLANE_DIRECTORY}/test/yaml/envoy/vcl.conf
---
apiVersion: v1
kind: Service
metadata:
namespace: envoy
name: envoy-vcl-service
spec:
selector:
app: envoy-vcl
ports:
- protocol: TCP
port: 10001
---
apiVersion: v1
kind: Pod
metadata:
name: envoy-linux
namespace: envoy
labels:
app: envoy-linux
spec:
containers:
- name: envoy-linux
image: envoyproxy/envoy-contrib:v1.21-latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5001
- containerPort: 5003
command: ["tail", "-f", "/dev/null"]
volumeMounts:
- mountPath: /home/hostuser
name: hostuser
- mountPath: /etc/envoy/envoy.yaml
name: envoyyaml
nodeName: _NODE_1_
volumes:
- name: hostuser
hostPath:
path: ${HOME}
- name: envoyyaml
hostPath:
path: ${VPP_DATAPLANE_DIRECTORY}/test/yaml/envoy/envoy.yaml
---
apiVersion: v1
kind: Service
metadata:
namespace: envoy
name: envoy-linux-service
spec:
selector:
app: envoy-linux
ports:
- protocol: TCP
port: 10001

24
test/yaml/envoy/vcl.conf Normal file
View File

@ -0,0 +1,24 @@
vcl {
# Max rx/tx session buffers sizes in bytes. Increase for high throughput traffic.
rx-fifo-size 400000
tx-fifo-size 400000
# Size of shared memory segments between VPP and VCL in bytes
segment-size 1000000000
add-segment-size 1000000000
# App has access to global routing table
app-scope-global
# Allow inter-app shared-memory cut-through sessions
app-scope-local
# Message queues use eventfds for notifications
use-mq-eventfd
# VCL worker incoming message queue size
event-queue-size 40000
app-socket-api @vpp/session
}

52
test/yaml/mvpp/test.yaml Normal file
View File

@ -0,0 +1,52 @@
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: mvpp
name: mvpp
namespace: mvpp
annotations:
"cni.projectcalico.org/vpp.memif.ports": "tcp:4444-20000,udp:4444-20000"
"cni.projectcalico.org/vpp.tuntap.ports": "default"
spec:
containers:
- name: mvpp
image: calicovpp/vpp:latest
command: ["tail", "-f", "/dev/null"]
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
ports:
- containerPort: 4444
protocol: UDP
resources:
limits:
memory: 2Gi
hugepages-2Mi: 2Gi
volumeMounts:
- mountPath: /home/hostuser
name: hostuser
- mountPath: /hugepages
name: hugepage
nodeName: _NODE_1_
volumes:
- name: hugepage
emptyDir:
medium: HugePages
- name: hostuser
hostPath:
path: ${HOME}
---
apiVersion: v1
kind: Service
metadata:
namespace: mvpp
name: mvpp-service
spec:
selector:
app: mvpp
ports:
- protocol: UDP
port: 4444