coutour自身也是Ingress控制器,所以v1beta1v1版本的ingress资源规范,在coutour上也能使用,只不过其kubernetes.io/ingress.class:为contour

除了ingress之外,其还支持其独有的资源规范,是其通过CRD引入的,名字为httpproxies.projectcontour.io

1
2
3
4
5
# 查看api-resources
root@k8s-master01:~/yaml/chapter13# kubectl api-resources | grep "projectcontour"
extensionservices extensionservice,extensionservices projectcontour.io/v1alpha1 true ExtensionService
httpproxies proxy,proxies projectcontour.io/v1 true HTTPProxy
tlscertificatedelegations tlscerts projectcontour.io/v1 true TLSCertificateDelegation

我们要想通过contour实现各种高级功能是通过httpproxies来实现的。

httpproxies资源规范

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
apiVersion: projectcontour.io/v1   # API群组及版本;
kind: HTTPProxy # CRD资源的名称;
metadata:
name <string>
namespace <string> # 名称空间级别的资源
spec:
virtualhost <VirtualHost> # 定义FQDN格式的虚拟主机,类似于Ingress中host
fqdn <string> # 虚拟主机FQDN格式的名称
tls <TLS> # 启用HTTPS,且默认以301将HTTP请求重定向至HTTPS
secretName <string> # 存储于证书和私钥信息的Secret资源名称
minimumProtocolVersion <string> # 支持的SSL/TLS协议的最低版本
passthrough <boolean> # 是否启用透传模式,启用时控制器不卸载HTTPS会话
clientValidation <DownstreamValidation> # 验证客户端证书,可选配置
caSecret <string> # 用于验证客户端证书的CA的证书
routes <[]Route> # 定义路由规则
conditions <[]Condition> # 流量匹配条件,支持PATH前缀和标头匹配两种检测机制
prefix <String> # PATH路径前缀匹配,类似于Ingress中的path字段
permitInsecure <Boolean> # 是否禁止默认的将HTTP重定向到HTTPS的功能
services <[]Service> # 后端服务,会对应转换为Envoy的Cluster定义
name <String> # 服务名称
port <Integer> # 服务端口
protocol <String> # 到达后端服务的协议,可用值为tls、h2或者h2c
validation <UpstreamValidation> # 是否校验服务端证书
caSecret <String>
subjectName <string> # 要求证书中使用的Subject值

httpproxy示例

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
root@k8s-master01:~/yaml/chapter13# vim httpproxy-demo.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-demo
namespace: default
spec:
virtualhost:
fqdn: www.mylinuxops.cn # 主机名称
tls: # 定义tls
secretName: mylinuxops-tls # 证书的私钥和证书
minimumProtocolVersion: "tlsv1.1" # 定义tls的最低版本
routes:
- conditions:
- prefix: / # 对于/的访问全部代理给后端
services: # services
- name: demoapp
port: 80
permitInsecure: true # 不将HTTP重定向到HTTPS

2.应用配置清单

1
2
root@k8s-master01:~/yaml/chapter13# kubectl  apply -f httpproxy-demo.yaml
httpproxy.projectcontour.io/httpproxy-demo created

3.测试访问

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# 查看contour的端口
root@k8s-master01:~/yaml/chapter13# kubectl get svc -n projectcontour
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
contour ClusterIP 10.100.169.73 <none> 8001/TCP 49m
envoy LoadBalancer 10.110.150.215 <pending> 80:32182/TCP,443:30273/TCP 49m

# 访问http
root@k8s-master01:~/yaml/chapter13# curl -H "HOST:www.mylinuxops.cn" 172.16.11.81:32182
iKubernetes demoapp v1.0 !! ClientIP: 192.168.131.23, ServerName: deployment-demo-fb544c5d8-qbwtm, ServerIP: 192.168.131.20!
root@k8s-master01:~/yaml/chapter13# curl -H "HOST:www.mylinuxops.cn" 172.16.11.81:32182
iKubernetes demoapp v1.0 !! ClientIP: 192.168.131.23, ServerName: deployment-demo-fb544c5d8-h97bv, ServerIP: 192.168.96.23!
root@k8s-master01:~/yaml/chapter13# curl -H "HOST:www.mylinuxops.cn" 172.16.11.81:32182
iKubernetes demoapp v1.0 !! ClientIP: 192.168.131.23, ServerName: deployment-demo-fb544c5d8-d2k7v, ServerIP: 192.168.96.24!


# 访问https
# root@k8s-master01:~/yaml/chapter13# curl -H "HOST:www.mylinuxops.cn" https://172.16.11.81:30273
curl: (35) OpenSSL SSL_connect: Connection reset by peer in connection to 172.16.11.81:30273
# 此处不能使用主机头的方式来进行访问
root@k8s-master01:~/yaml/chapter13# echo "172.16.11.83 www.mylinuxops.cn" >> /etc/hosts
root@k8s-master01:~/yaml/chapter13# curl -k https://www.mylinuxops.cn:30273
iKubernetes demoapp v1.0 !! ClientIP: 192.168.30.24, ServerName: deployment-demo-fb544c5d8-qbwtm, ServerIP: 192.168.131.20!
root@k8s-master01:~/yaml/chapter13# curl -k https://www.mylinuxops.cn:30273
iKubernetes demoapp v1.0 !! ClientIP: 192.168.30.24, ServerName: deployment-demo-fb544c5d8-h97bv, ServerIP: 192.168.96.23!

HTTPProxy高级路由资源规范

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
spec:
routes <[]Route> # 定义路由规则
conditions <[]Condition>
prefix <String>
header <HeaderCondition> # 请求报文标头匹配
name <String> # 标头名称
present <Boolean> # true表示存在该标头即满足条件,值false没有意义
contains <String> # 标头值必须包含的子串
notcontains <String> # 标头值不能包含的子串
exact <String> # 标头值精确的匹配
notexact <String> # 标头值精确反向匹配,即不能与指定的值相同
services <[]Service> # 后端服务,转换为Envoy的Cluster
name <String>
port <Integer>
protocol <String>
weight <Int64> # 服务权重,用于流量分割
mirror <Boolean> # 流量镜像
requestHeadersPolicy <HeadersPolicy> # 到上游服务器请求报文的标头策略
set <[]HeaderValue> # 添加标头或设置指定标头的值
name <String>
value <String>
remove <[]String> # 移除指定的标头
responseHeadersPolicy <HeadersPolicy> # 到下游客户端响应报文的标头策略
loadBalancerPolicy <LoadBalancerPolicy> # 指定要使用负载均衡策略
strategy <String> # 具体使用的策略,支持Random、RoundRobin、Cookie
# 和WeightedLeastRequest,默认为RoundRobin;
requestHeadersPolicy <HeadersPolicy> # 路由级别的请求报文标头策略
reHeadersPolicy <HeadersPolicy> # 路由级别的响应报文标头策略
pathRewritePolicy <PathRewritePolicy> # URL重写
replacePrefix <[]ReplacePrefix>
prefix <String> # PATH路由前缀
replacement <String> # 替换为的目标路径

HTTPProxy高级应用示例

准备工作

1.创建出名称空间

1
2
root@k8s-master01:~/yaml/chapter13# kubectl create ns test
namespace/test created

2.部署出两个版本的deployment

1
2
3
4
root@k8s-master01:~/yaml/chapter13# kubectl create deployment demoappv11 --image="ikubernetes/demoapp:v1.1" -n test
deployment.apps/demoappv11 created
root@k8s-master01:~/yaml/chapter13# kubectl create deployment demoappv12 --image="ikubernetes/demoapp:v1.2" -n test
deployment.apps/demoappv12 created

3.为两个版本的deployment创建service

1
2
3
4
root@k8s-master01:~/yaml/chapter13# kubectl create service clusterip demoappv11 --tcp=80 -n test
service/demoappv11 created
root@k8s-master01:~/yaml/chapter13# kubectl create service clusterip demoappv12 --tcp=80 -n test
service/demoappv12 created

基于标头的路由

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
root@k8s-master01:~/yaml/chapter13# vim httpproxy-headers-routing.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-headers-routing
namespace: test
spec:
virtualhost:
fqdn: www.myk8s.com
routes:
- conditions: # 此处的两个过滤条件为与关系
- header:
name: X-Canary # 如果请求首部带X-Canary
present: true
- header:
name: User-Agent # 并且user-agent为curl
contains: curl
services:
- name: demoappv12 # 对应的service为demoappv12
port: 80
- services:
- name: demoappv11 # 或者service为demoappv11
port: 80

# 以上配置段表示,如果请求首部带X-Canary并且user-agent为curl的将其流量调度到demoappv12上。
# 非以上条件的全部调度到demoappv11上。

2.应用后测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
root@k8s-master01:~/yaml/chapter13# kubectl apply -f httpproxy-headers-routing.yaml
httpproxy.projectcontour.io/httpproxy-headers-routing created

# 添加一条本地解析。
root@k8s-master01:~/yaml/chapter13# echo "172.16.11.83 www.myk8s.com" >> /etc/hosts

# 查看端口
root@k8s-master01:~/yaml/chapter13# kubectl get svc -n projectcontour
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
contour ClusterIP 10.100.169.73 <none> 8001/TCP 114m
envoy LoadBalancer 10.110.150.215 <pending> 80:32182/TCP,443:30273/TCP 114m

# 测试访问
# 不带X-Canary进行访问,结果全部调度到v1.1
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com:32182
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com:32182
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!

# 测试访问带X-Canary首部
root@k8s-master01:~/yaml/chapter13# curl -H "X-Canary:true" www.myk8s.com:32182
iKubernetes demoapp v1.2 !! ClientIP: 192.168.30.24, ServerName: demoappv12-64c664955b-skq7g, ServerIP: 192.168.30.27!
root@k8s-master01:~/yaml/chapter13# curl -H "X-Canary:true" www.myk8s.com:32182
iKubernetes demoapp v1.2 !! ClientIP: 192.168.30.24, ServerName: demoappv12-64c664955b-skq7g, ServerIP: 192.168.30.27!
root@k8s-master01:~/yaml/chapter13# curl -H "X-Canary:true" www.myk8s.com:32182
iKubernetes demoapp v1.2 !! ClientIP: 192.168.30.24, ServerName: demoappv12-64c664955b-skq7g, ServerIP: 192.168.30.27!
# 全部被调度到v1.2上

流量拆分

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
root@k8s-master01:~/yaml/chapter13# vim httpproxy-traffic-splitting.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-traffic-splitting
namespace: test
spec:
virtualhost:
fqdn: www.myk8s.com
routes:
- conditions:
- prefix: /
services:
- name: demoappv11
port: 80
weight: 90 # 90%流量到达v11
- name: demoappv12
port: 80
weight: 10 # 10%流量到达v12

2.应用资源清单并测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
root@k8s-master01:~/yaml/chapter13# kubectl apply -f httpproxy-traffic-splitting.yaml
httpproxy.projectcontour.io/httpproxy-traffic-splitting configured

# 测试访问
root@k8s-master01:~/yaml/chapter13# while true; do curl http://www.myk8s.com:32182; sleep .1; done
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.2 !! ClientIP: 192.168.30.24, ServerName: demoappv12-64c664955b-skq7g, ServerIP: 192.168.30.27!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
iKubernetes demoapp v1.2 !! ClientIP: 192.168.30.24, ServerName: demoappv12-64c664955b-skq7g, ServerIP: 192.168.30.27!

# 可以看出v1.1与v1.2比例为9:1

流量镜像

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
root@k8s-master01:~/yaml/chapter13# vim httpproxy-traffic-mirror.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-traffic-mirror
namespace: test
spec:
virtualhost:
fqdn: www.myk8s.com
routes:
- conditions:
- prefix: /
services:
- name: demoappv11
port: 80
- name: demoappv12
port: 80
mirror: true
# 此配置表示当请求v11时,会同时发送一份到v12

2.应用配置并测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
root@k8s-master01:~/yaml/chapter13# kubectl apply -f httpproxy-traffic-mirror.yaml
httpproxy.projectcontour.io/httpproxy-traffic-mirror created

# 访问v1.1版本
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com:32182
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com:32182
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# curl www.myk8s.com:32182
iKubernetes demoapp v1.1 !! ClientIP: 192.168.30.24, ServerName: demoappv11-59544d568d-5tpb9, ServerIP: 192.168.30.28!
root@k8s-master01:~/yaml/chapter13# kubectl logs -n test demoappv1
demoappv11-59544d568d-5tpb9 demoappv12-64c664955b-skq7g

# 查看v1.2中的访问日志
root@k8s-master01:~/yaml/chapter13# kubectl logs -n test demoappv12-64c664955b-skq7g
* Running on http://0.0.0.0:80/ (Press CTRL+C to quit)
192.168.30.24 - - [23/Aug/2021 09:43:10] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:16] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:18] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:22] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:37] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:39] "GET / HTTP/1.1" 200 -
192.168.30.24 - - [23/Aug/2021 09:43:40] "GET / HTTP/1.1" 200 -

# 可以看出流量被发往了v1.2

负载均衡模式

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
root@k8s-master01:~/yaml/chapter13# vim httpproxy-lb-strategy.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-lb-strategy
namespace: test
spec:
virtualhost:
fqdn: www.myk8s.com
routes:
- conditions:
- prefix: /
services:
- name: demoappv11
port: 80
- name: demoappv12
port: 80
loadBalancerPolicy:
strategy: Random

HTTPProxy服务弹性

如果后端服务器挂了可以定义超时策略,重试策略,也可以自己设定健康状态检测

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
spec:
routes <[]Route>
timeoutPolicy <TimeoutPolicy> # 超时策略
response <String> # 等待服务器响应报文的超时时长
idle <String> # 超时后,Envoy维持与客户端之间连接的空闲时长
retryPolicy <RetryPolicy> # 重试策略
count <Int64> # 重试的次数,默认为1
perTryTimeout <String> # 每次重试的超时时长
healthCheckPolicy <HTTPHealthCheckPolicy> # 主动健康状态检测
path <String> # 检测针对的路径(HTTP端点)
host <String> # 检测时请求的虚拟主机
intervalSeconds <Int64> # 时间间隔,即检测频度,默认为5秒
timeoutSeconds <Int64> # 超时时长,默认为2秒
unhealthyThresholdCount <Int64> # 判定为非健康状态的阈值,即连续错误次数
healthyThresholdCount <Int64> # 判定为健康状态的阈值
服务弹性示例

1.编写资源清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
root@k8s-master01:~/yaml/chapter13# vim httpproxy-retry-timeout.yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: httpproxy-retry-timeout
namespace: dev
spec:
virtualhost:
fqdn: www.myk8s.com
routes:
- timeoutPolicy:
response: 2s # 两秒没有相应就算超时
idle: 5s # 空闲5s
retryPolicy:
count: 3 # 重试3次
perTryTimeout: 500ms # 重试的超时时间500ms
services:
- name: demoapp
port: 80