Kuryr kubernetes 源码简介

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/liuliuzi_hz/article/details/79324638
Kuryr kubernetes 分为3类独立可执行程序,分别为Kuryr Controller,Kuryr CNI,Kuryr CNI daemon(可选,抽离CNI的watach 功能以减少多pod同时启动时候的资源浪费俄)。
Kuryr Controller 独立运行,功能是 watch k8s API 和从neutron 申请资源port等,通过k8s API修改 pod 的annotate 里网络信息。
Kuryr CNI 在每个work node 备用。当pod启动或删除调用该插件,该插件具体执行veth创建和该veth 和neutron 创建的port 连接管理。如果没有Kuryr CNI daemon,还要负责创建时watch k8s API 直到Kuryr Controller 将网络信息写入 pod 的annotate 。
Kuryr CNI daemon 在每个work node 运行。当他运行时,主要抽离出Kuryr CNI 的watch 功能和执行功能。具体执行veth创建和该veth 和neutron 创建的port 连接管理也归它管理,Kuryr CNI 只保留CNI 入口功能,如同 Kuryr CNI daemon的客户端
Kuryr CNI 和 Kuryr CNI daemon用同一个应用程序,参数不同运行方式就不同。

Kuryr Controller 入口
kuryr_kubernetes/controller/service.py

class KuryrK8sService(service.Service):
"""Kuryr-Kubernetes controller Service."""

def __init__(self):
super(KuryrK8sService, self).__init__()

objects.register_locally_defined_vifs()
pipeline = h_pipeline.ControllerPipeline(self.tg)//创建pipeline
self.watcher = watcher.Watcher(pipeline, self.tg)
# TODO(ivc): pluggable resource/handler registration
for resource in ["pods", "services", "endpoints"]: //watch k8s API 这三种资源
self.watcher.add("%s/%s" % (constants.K8S_API_BASE, resource))
pipeline.register(h_vif.VIFHandler()) //注册处理函数,watch到的变动将由这些注册过的处理函数处理
pipeline.register(h_lbaas.LBaaSSpecHandler())
pipeline.register(h_lbaas.LoadBalancerHandler())

def start(self):
LOG.info("Service '%s' starting", self.__class__.__name__)
health.ReadinessChecker()
super(KuryrK8sService, self).start()
self.watcher.start()
LOG.info("Service '%s' started", self.__class__.__name__)

关于 watch
kuryr_kubernetes/watcher.py
def start(self):
self._running = True
for path in self._resources - set(self._watching):
self._start_watch(path)
def _start_watch(self, path):
tg = self._thread_group
self._idle[path] = True
if tg:
self._watching[path] = tg.add_thread(self._watch, path)//thread_group添加观察线程
else:
self._watching[path] = None
self._watch(path)

回到VIFHandler
kuryr_kubernetes/controller/handlers/vif.py
def on_present(self, pod):
vif = self._get_vif(pod)
if not vif://如果观察到的pod没有vif
project_id = self._drv_project.get_project(pod)
security_groups = self._drv_sg.get_security_groups(pod, project_id)
subnets = self._drv_subnets.get_subnets(pod, project_id)
vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
security_groups)//从vif_pool申请到port
try:
self._set_vif(pod, vif)//将vif信息写回k8s API 里pod 的annotation
except k_exc.K8sClientException as ex:
LOG.debug("Failed to set annotation: %s", ex)
# FIXME(ivc): improve granularity of K8sClient exceptions:
# only resourceVersion conflict should be ignored
self._drv_vif_pool.release_vif(pod, vif, project_id,
security_groups)
elif not vif.active://如果观察到的pod没有vif没有active
self._drv_vif_pool.activate_vif(pod, vif)
self._set_vif(pod, vif)//将vif信息写回k8s API 里pod 的annotation

接下看如何从vif_pool申请到port
kuryr_kubernetes/controller/drivers/vif_pool.py
def request_vif(self, pod, project_id, subnets, security_groups):
try:
host_addr = self._get_host_addr(pod)
except KeyError:
LOG.warning("Pod has not been scheduled yet.")
raise
pool_key = (host_addr, project_id, tuple(sorted(security_groups)))
try:
return self._get_port_from_pool(pool_key, pod, subnets)//从pool中获取提前申请的port
except exceptions.ResourceNotReady as ex:
LOG.warning("Ports pool does not have available ports!")
eventlet.spawn(self._populate_pool, pool_key, pod, subnets)//如果资源不足,协程申请端口
raise ex

def _get_port_from_pool(self, pool_key, pod, subnets):
try:
port_id = self._available_ports_pools[pool_key].pop()
except IndexError:
raise exceptions.ResourceNotReady(pod)
if config.CONF.kubernetes.port_debug:
neutron = clients.get_neutron_client()
neutron.update_port(
port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
# check if the pool needs to be populated
if (self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_min)://如果缓存的提前申请的port数量不足最小数量
eventlet.spawn(self._populate_pool, pool_key, pod, subnets)//协程申请端口
return self._existing_vifs[port_id]

具体ports_pool 如何申请端口
def _populate_pool(self, pool_key, pod, subnets):
............
vifs = self._drv_vif.request_vifs(
pod=pod,
project_id=pool_key[1],
subnets=subnets,
security_groups=list(pool_key[2]),
num_ports=num_ports)
for vif in vifs:
self._existing_vifs[vif.id] = vif
self._available_ports_pools.setdefault(pool_key,
[]).append(vif.id)
具体vif 如何申请端口
kuryr_kubernetes/controller/drivers/neutron_vif.py
def request_vif(self, pod, project_id, subnets, security_groups):
neutron = clients.get_neutron_client()//获取neutron客户端
rq = self._get_port_request(pod, project_id, subnets, security_groups)//生成request body
port = neutron.create_port(rq).get('port')//申请port
vif_plugin = self._get_vif_plugin(port)
return ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)

CNI入口在kuryr_kubernetes/cni/main.py 。CNI 和CNI daemon用同一个可执行程序,根据参数可以运行为daemon或者插件形式。
if CONF.cni_daemon.daemon_enabled:
runner = cni_api.CNIDaemonizedRunner()//守护进程形式运行
else:
runner = cni_api.CNIStandaloneRunner(K8sCNIPlugin()) //插件形式运行
以插件形式运行时,需要满足CNI协议参数要求,即参数是ADD,DEL,VERSION
kuryr_kubernetes/cni/api.py
def run(self, env, fin, fout):
try:
# Prepare params according to calling Object
params = self.prepare_env(env, fin)
if env.get('CNI_COMMAND') == 'ADD':
vif = self._add(params)
self._write_dict(fout, vif)
elif env.get('CNI_COMMAND') == 'DEL':
self._delete(params)
elif env.get('CNI_COMMAND') == 'VERSION':
self._write_version(fout)
else:
raise k_exc.CNIError(_("unknown CNI_COMMAND: %s")
% env['CNI_COMMAND'])
return 0
except Exception as ex:
# LOG.exception
self._write_exception(fout, str(ex))
return 1

class K8sCNIPlugin(cni_api.CNIPlugin):

def add(self, params):
self._setup(params)
self._pipeline.register(h_cni.AddHandler(params, self._done)) //register add handler
self._watcher.start()
return self._vif

def delete(self, params):
self._setup(params)
self._pipeline.register(h_cni.DelHandler(params, self._done))
self._watcher.start()

def _done(self, vif):
self._vif = vif
self._watcher.stop()

def _setup(self, params):
clients.setup_kubernetes_client()
self._pipeline = h_cni.CNIPipeline()
self._watcher = k_watcher.Watcher(self._pipeline)
self._watcher.add( //watch k8s API
"%(base)s/namespaces/%(namespace)s/pods"
"?fieldSelector=metadata.name=%(pod)s" % {
'base': k_const.K8S_API_BASE,
'namespace': params.args.K8S_POD_NAMESPACE,
'pod': params.args.K8S_POD_NAME})

kuryr_kubernetes/cni/handlers.py
class AddHandler(CNIHandlerBase):
def __init__(self, cni, on_done):
LOG.debug("AddHandler called with CNI env: %r", cni)
super(AddHandler, self).__init__(cni, on_done)
self._vif = None
def on_vif(self, pod, vif):
if not self._vif:
self._vif = vif.obj_clone()
self._vif.active = True
b_base.connect(self._vif, self._get_inst(pod), //连接pod和port
self._cni.CNI_IFNAME, self._cni.CNI_NETNS)
if vif.active:
self._callback(vif)

kuryr_kubernetes/cni/binding/base.py
连接和断开连接
def connect(vif, instance_info, ifname, netns=None):
driver = _get_binding_driver(vif)//获取驱动
os_vif.plug(vif, instance_info)//通过os_vif包连接vif和网络接口
driver.connect(vif, ifname, netns)//调用驱动的连接函数
_configure_l3(vif, ifname, netns)//配置l3的路由,网关信息
def disconnect(vif, instance_info, ifname, netns=None):
driver = _get_binding_driver(vif)
driver.disconnect(vif, ifname, netns)
os_vif.unplug(vif, instance_info)

class VIFOpenVSwitchDriver(BaseBridgeDriver):
def connect(self, vif, ifname, netns):
super(VIFOpenVSwitchDriver, self).connect(vif, ifname, netns)
# FIXME(irenab) use pod_id (neutron port device_id)
instance_id = 'kuryr'
net_utils.create_ovs_vif_port(vif.bridge_name, vif.vif_name,
vif.port_profile.interface_id,
vif.address, instance_id)

具体添加veth到br-int。最后cmd形式调用ovs-vsctl
kuryr_kubernetes/linux_net_utils.py
def _ovs_vsctl(args, timeout=None):
full_args = ['ovs-vsctl']
if timeout is not None:
full_args += ['--timeout=%s' % timeout]
full_args += args
try:
return processutils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. Exception: %(exception)s",
{'cmd': full_args, 'exception': e})
raise
def _create_ovs_vif_cmd(bridge, dev, iface_id, mac, instance_id)://生成命令行参数
cmd = ['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id]
return cmd
def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id):
_ovs_vsctl(_create_ovs_vif_cmd(bridge, dev, iface_id, mac, instance_id))
def delete_ovs_vif_port(bridge, dev):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])





猜你喜欢

转载自blog.csdn.net/liuliuzi_hz/article/details/79324638