Kubernetes helm API 客户端文件生成和远程调用

1.什么是helm?

首先说说什么是helm,引用helm原话“The Kubernetes Package Manager”—— k8s的包管理器。

2.为何要用helm?or 用helm有什么好处?

用过k8s的人都知道,k8s的replicationcontroller、service、pod 这些的创建都需要一个配置的yaml文件,而如果你的环境比较繁琐时你会有很多yaml,如果你想修改某个值且这个值用的地方很多那么问题就来了,真心显得比较麻烦。如果你要更新部署呢?那么就更是麻烦了,而helm可以帮你解决这些麻烦。还有一个优点是helm 已经有很多开源应用的库。

For 想了解更多的同学们:这里有两个连接 一个是k8s的 一个是helm的网站。k8s:点击打开链接  helm github:点击打开链接

3. 使用maven gRPC插件生成helm (Till) 客户端sdk(实际是一堆Java 文件)

a、建立maven 项目配置maven gRPC 插件,配置如下:

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>chinacloud.com.cn</groupId>
  <artifactId>grpc</artifactId>
  <packaging>jar</packaging>
  <version>0.0.1-SNAPSHOT</version>
  <name>grpc Maven Webapp</name>
  <url>http://maven.apache.org</url>

  <properties>
    <grpc.version>1.0.3</grpc.version>
  </properties>

  <dependencies>
    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>3.8.1</version>
      <scope>test</scope>
    </dependency>
    <!-- gRPC -->
    <dependency>
      <groupId>io.grpc</groupId>
      <artifactId>grpc-netty</artifactId>
      <version>${grpc.version}</version>
    </dependency>
    <dependency>
      <groupId>io.grpc</groupId>
      <artifactId>grpc-protobuf</artifactId>
      <version>${grpc.version}</version>
    </dependency>
    <dependency>
      <groupId>io.grpc</groupId>
      <artifactId>grpc-stub</artifactId>
      <version>${grpc.version}</version>
    </dependency>
  </dependencies>
  <build>
    <finalName>grpc</finalName>
    <extensions>
      <extension>
        <groupId>kr.motd.maven</groupId>
        <artifactId>os-maven-plugin</artifactId>
        <version>1.4.1.Final</version>
      </extension>
    </extensions>

    <plugins>
      <!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build
        itself. -->
      <plugin>
        <groupId>org.eclipse.m2e</groupId>
        <artifactId>lifecycle-mapping</artifactId>
        <version>1.0.0</version>
        <configuration>
          <lifecycleMappingMetadata>
            <pluginExecutions>
              <pluginExecution>
                <pluginExecutionFilter>
                  <groupId>
                    org.apache.maven.plugins
                  </groupId>
                  <artifactId>
                    maven-compiler-plugin
                  </artifactId>
                  <versionRange>
                    [2.5.1,)
                  </versionRange>
                  <goals>
                    <goal>compile</goal>
                  </goals>
                </pluginExecutionFilter>
                <action>
                  <ignore></ignore>
                </action>
              </pluginExecution>
            </pluginExecutions>
          </lifecycleMappingMetadata>
        </configuration>
      </plugin>
      <plugin>
        <groupId>org.xolstice.maven.plugins</groupId>
        <artifactId>protobuf-maven-plugin</artifactId>
        <version>0.5.0</version>
        <configuration>
          <protocArtifact>com.google.protobuf:protoc:3.0.2:exe:${os.detected.classifier}</protocArtifact>
          <pluginId>grpc-java</pluginId>
          <pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
        </configuration>
        <executions>
          <execution>
            <goals>
              <goal>compile</goal>
              <goal>compile-custom</goal>
            </goals>
          </execution>
        </executions>
      </plugin>
    </plugins>
  </build>
</project>

b.在helm 官网 找到Tiller 的hapi proto文件用来生成java 代码,同时把这些文件放在src/main/proto文件中



c.运行protobuf:compile taget文件夹能找到生成的java文件,然后考到src/main/java 如图:


 4.起飞,在main函数调用Tiller的service方法,前提helm以及k8s都已有环境。如下是起飞main方法:

 

 public static void main(String[] args) throws InterruptedException {
//        ManagedChannel channel = ManagedChannelBuilder.forAddress("172.16.101.200", 44134).usePlaintext(true).build();

 

        ManagedChannel channel = NettyChannelBuilder
                .forAddress("172.16.80.151", 44134)
                .negotiationType(NegotiationType.PLAINTEXT).build();

 

        ReleaseServiceGrpc.ReleaseServiceBlockingStub blockingStub = ReleaseServiceGrpc
                .newBlockingStub(channel).withCallCredentials(new CallCredentials(){

 

     @Override
     public void applyRequestMetadata(MethodDescriptor<?, ?> method, Attributes attrs,
       Executor appExecutor, MetadataApplier applier) {
      // TODO Auto-generated method stub
        Metadata metadata = new Metadata();
                    metadata.put(Metadata.Key.of("x-helm-api-client", Metadata.ASCII_STRING_MARSHALLER), "v2.2.0");
                    applier.apply(metadata);
     }
                 
                });

 

        //deployAChart(blockingStub);
        //updateDeploy(blockingStub);
        //undeploy(blockingStub);
        Tiller.GetVersionResponse response = blockingStub.getVersion(Tiller.GetVersionRequest.newBuilder().build());
        System.out.println(response.getVersion());

 

        Iterator<Tiller.ListReleasesResponse> listReleasesIterator = blockingStub.listReleases(Tiller.ListReleasesRequest.newBuilder().build());
        System.out.println("================start=================");
        while (listReleasesIterator.hasNext()) {
            System.out.println(listReleasesIterator.next());
            System.out.println("---------------------------------");
        }
        System.out.println("================end=================");
       
        Tiller.GetReleaseStatusResponse releaseStatus = blockingStub.getReleaseStatus(Tiller.GetReleaseStatusRequest
                .newBuilder()
                .setName("invited-catfish").build());
        System.out.println("status:"+releaseStatus);

 

        System.out.println("-----------------");
       
        Tiller.GetHistoryResponse history = blockingStub.getHistory(Tiller.GetHistoryRequest.newBuilder()
                .setName("invited-catfish").setMax(10).build());
        System.out.println(history);

 

        try {
            channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

5.提出运行结果

releases {
  name: "full-meerkat"
  info {
    status {
      code: DEPLOYED
      notes: "1. Get the application URL by running these commands:\n  export POD_NAME=$(kubectl get pods --namespace default -l \"app=full-meerkat-test\" -o jsonpath=\"{.items[0].metadata.name}\")\n  echo \"Visit http://127.0.0.1:8080 to use your application\"\n  kubectl port-forward $POD_NAME 8080:80\n"
    }
    first_deployed {
      seconds: 1489569670
      nanos: 350911984
    }
    last_deployed {
      seconds: 1489569670
      nanos: 350911984
    }
    Description: "Install complete"
  }
  chart {
    metadata {
      name: "test"
      version: "0.1.0"
      description: "A Helm chart for Kubernetes"
      apiVersion: "v1"
    }
    templates {
      name: "templates/NOTES.txt"
      data: "1. Get the application URL by running these commands:\n{{- if contains \"NodePort\" .Values.service.type }}\n  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath=\"{.spec.ports[0].nodePort}\" services {{ template \"fullname\" . }})\n  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath=\"{.items[0].status.addresses[0].address}\")\n  echo http://$NODE_IP:$NODE_PORT/login\n{{- else if contains \"LoadBalancer\" .Values.service.type }}\n    NOTE: It may take a few minutes for the LoadBalancer IP to be available.\n          You can watch the status of by running \'kubectl get svc -w {{ template \"fullname\" . }}\'\n  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template \"fullname\" . }} -o jsonpath=\'{.status.loadBalancer.ingress[0].ip}\')\n  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}\n{{- else if contains \"ClusterIP\"  .Values.service.type }}\n  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l \"app={{ template \"fullname\" . }}\" -o jsonpath=\"{.items[0].metadata.name}\")\n  echo \"Visit http://127.0.0.1:8080 to use your application\"\n  kubectl port-forward $POD_NAME 8080:{{ .Values.service.externalPort }}\n{{- end }}\n"
    }
    templates {
      name: "templates/_helpers.tpl"
      data: "{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n"
    }
    templates {
      name: "templates/configMap.yaml"
      data: "apiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n  name: {{ template \"fullname\" . }}-cfgmap\r\n  # annotations:\r\n  #    # This is what defines this resource as a hook. Without this line, the\r\n  #    # job is considered part of the release.\r\n  #  \"helm.sh/hook\": pre-install\r\ndata:\r\n  dessert: {{ .Values.dessert }}\r\n  test: {{ .Release.Revision }}-{{ .Release.IsUpgrade }}-{{ .Release.IsInstall }}\r\n"
    }
    templates {
      name: "templates/deployment.yaml"
      data: "apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: {{ template \"fullname\" . }}\n  labels:\n    chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n  replicas: {{ .Values.replicaCount }}\n  template:\n    metadata:\n      labels:\n        app: {{ template \"fullname\" . }}\n      annotations:\n        pod.beta.kubernetes.io/init-containers: \'[\n              {\n                  \"name\": \"remove-lost-found\",\n                  \"image\": \"busybox:1.25.0\",\n                  \"command\": [\"rm\", \"-fr\", \"/var/lib/mysql/lost+found\"],\n                  \"volumeMounts\": [\n                      {\n                          \"name\": \"data\",\n                          \"mountPath\": \"/var/lib/mysql\"\n                      }\n                  ],\n                  \"imagePullPolicy\": {{ .Values.imagePullPolicy | quote }}\n              }\n          ]\'\n    spec:\n      containers:\n      - name: {{ .Chart.Name }}\n        image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n        imagePullPolicy: {{ .Values.image.pullPolicy }}\n        ports:\n        - containerPort: {{ .Values.service.internalPort }}\n        livenessProbe:\n          httpGet:\n            path: /\n            port: {{ .Values.service.internalPort }}\n        readinessProbe:\n          httpGet:\n            path: /\n            port: {{ .Values.service.internalPort }}\n        resources:\n{{ toYaml .Values.resources | indent 12 }}\n"
    }
    templates {
      name: "templates/service.yaml"
      data: "apiVersion: v1\nkind: Service\nmetadata:\n  name: {{ template \"fullname\" . }}\n  labels:\n    chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\nspec:\n  type: {{ .Values.service.type }}\n  ports:\n  - port: {{ .Values.service.externalPort }}\n    targetPort: {{ .Values.service.internalPort }}\n    protocol: TCP\n    name: {{ .Values.service.name }}\n  selector:\n    app: {{ template \"fullname\" . }}\n"
    }
    values {
      raw: "# Default values for test.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\nreplicaCount: 1\nimage:\n  repository: 172.16.101.192/common/nginx\n  tag: 1.8.1\n  pullPolicy: IfNotPresent\nservice:\n  name: nginx\n  type: ClusterIP\n  externalPort: 80\n  internalPort: 80\nresources:\n  limits:\n    cpu: 200m\n    memory: 128Mi\n  requests:\n    cpu: 200m\n    memory: 128Mi\ndessert: cake\n\n"
    }
    files {
      type_url: ".helmignore"
      value: "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n"
    }
  }
  config {
    raw: "{}\n"
  }
  manifest: "\n---\n# Source: test/templates/configMap.yaml\napiVersion: v1\r\nkind: ConfigMap\r\nmetadata:\r\n  name: full-meerkat-test-cfgmap\r\n  # annotations:\r\n  #    # This is what defines this resource as a hook. Without this line, the\r\n  #    # job is considered part of the release.\r\n  #  \"helm.sh/hook\": pre-install\r\ndata:\r\n  dessert: cake\r\n  test: 1-false-true\r\n\n---\n# Source: test/templates/service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: full-meerkat-test\n  labels:\n    chart: \"test-0.1.0\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    targetPort: 80\n    protocol: TCP\n    name: nginx\n  selector:\n    app: full-meerkat-test\n\n---\n# Source: test/templates/deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: full-meerkat-test\n  labels:\n    chart: \"test-0.1.0\"\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: full-meerkat-test\n      annotations:\n        pod.beta.kubernetes.io/init-containers: \'[\n              {\n                  \"name\": \"remove-lost-found\",\n                  \"image\": \"busybox:1.25.0\",\n                  \"command\": [\"rm\", \"-fr\", \"/var/lib/mysql/lost+found\"],\n                  \"volumeMounts\": [\n                      {\n                          \"name\": \"data\",\n                          \"mountPath\": \"/var/lib/mysql\"\n                      }\n                  ],\n                  \"imagePullPolicy\": \n              }\n          ]\'\n    spec:\n      containers:\n      - name: test\n        image: \"172.16.101.192/common/nginx:1.8.1\"\n        imagePullPolicy: IfNotPresent\n        ports:\n        - containerPort: 80\n        livenessProbe:\n          httpGet:\n            path: /\n            port: 80\n        readinessProbe:\n          httpGet:\n            path: /\n            port: 80\n        resources:\n            limits:\n              cpu: 200m\n              memory: 128Mi\n            requests:\n              cpu: 200m\n              memory: 128Mi\n            \n"
  version: 1
  namespace: "default"
}

 

猜你喜欢

转载自www.linuxidc.com/Linux/2017-06/144786.htm