Try adding an ip pool
This commit is contained in:
@@ -47,19 +47,29 @@ helm install \
|
||||
cilium/cilium \
|
||||
--version 1.15.1 \
|
||||
--namespace kube-system \
|
||||
--set rollOutCiliumPods=true \
|
||||
--set localRedirectPolicy=true \
|
||||
--set ipam.mode=kubernetes \
|
||||
--set=kubeProxyReplacement=true \
|
||||
--set=securityContext.capabilities.ciliumAgent="{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" \
|
||||
--set=securityContext.capabilities.cleanCiliumState="{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" \
|
||||
--set=cgroup.autoMount.enabled=false \
|
||||
--set=cgroup.hostRoot=/sys/fs/cgroup \
|
||||
--set securityContext.capabilities.ciliumAgent="{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" \
|
||||
--set securityContext.capabilities.cleanCiliumState="{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" \
|
||||
--set cgroup.autoMount.enabled=false \
|
||||
--set cgroup.hostRoot=/sys/fs/cgroup \
|
||||
--set hubble.relay.enabled=true \
|
||||
--set hubble.ui.enabled=true \
|
||||
--set=k8sServiceHost=localhost \
|
||||
--set=k8sServicePort=7445 \
|
||||
--set kubeProxyReplacement=strict \
|
||||
--set kubeProxyReplacementHealthzBindAddr=0.0.0.0:10256 \
|
||||
--set socketLB.enabled=true \
|
||||
--set k8sServiceHost=localhost \
|
||||
--set k8sServicePort=7445 \
|
||||
--set autoDirectNodeRoutes=true \
|
||||
--set operator.replicas=1 \
|
||||
--set externalIPs.enabled=true \
|
||||
--set enableCiliumEndpointSlice=true
|
||||
--set endpointRoutes.enabled=true \
|
||||
--set bgp.enabled=false \
|
||||
--set bgp.announce.loadbalancerIP=true \
|
||||
--set bgp.announce.podCIDR=false \
|
||||
--set enableCiliumEndpointSlice=true \
|
||||
--set l2announcements.enabled=true
|
||||
```
|
||||
|
||||
You can modify this after install with:
|
||||
|
||||
Reference in New Issue
Block a user