kafka

$ sudo apt update 
[sudo] password for kafka: 

kafka@kafkaubuntu2004:~$ sudo apt install default-jdk
Reading package lists... Done
Building dependency tree   


kafka@kafkaubuntu2004:~$ java --version
openjdk 11.0.21 2023-10-17
OpenJDK Runtime Environment (build 11.0.21+9-post-Ubuntu-0ubuntu120.04)
OpenJDK 64-Bit Server VM (build 11.0.21+9-post-Ubuntu-0ubuntu120.04, mixed mode, sharing)
kafka@kafkaubuntu2004:~$ mkdir ~/kafka && cd ~/kafka
kafka@kafkaubuntu2004:~/kafka$ tar -xvzf ~/Downloads/kafka.tgz --strip 1
kafka_2.12-3.4.1/LICENSE
kafka_2.12-3.4.1/NOTICE



kafka@kafkaubuntu2004:~/kafka$ vi ~/kafka/config/server.properties
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ pwd
/home/kafka/kafka
kafka@kafkaubuntu2004:~/kafka$ sudo vi /etc/systemd/system/zookeeper.service
kafka@kafkaubuntu2004:~/kafka$ sudo nano /etc/systemd/system/kafka.service
kafka@kafkaubuntu2004:~/kafka$ sudo vi /etc/systemd/system/kafka.service
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ sudo systemctl start kafka
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ sudo systemctl status kafka
● kafka.service
     Loaded: loaded (/etc/systemd/system/kafka.service; disabled; vendor preset: enabled)
     Active: active (running) since Wed 2023-12-13 00:04:33 UTC; 8s ago
   Main PID: 17310 (sh)
      Tasks: 72 (limit: 19173)
     Memory: 341.5M
     CGroup: /system.slice/kafka.service
             ├─17310 /bin/sh -c /home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/server.>
             └─17311 java -Xmx1G -Xms1G -server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccup>

Dec 13 00:04:33 kafkaubuntu2004 systemd[1]: Started kafka.service.
...skipping...
● kafka.service
     Loaded: loaded (/etc/systemd/system/kafka.service; disabled; vendor preset: enabled)
     Active: active (running) since Wed 2023-12-13 00:04:33 UTC; 8s ago
   Main PID: 17310 (sh)
      Tasks: 72 (limit: 19173)
     Memory: 341.5M
     CGroup: /system.slice/kafka.service
             ├─17310 /bin/sh -c /home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/server.>
             └─17311 java -Xmx1G -Xms1G -server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccup>

Dec 13 00:04:33 kafkaubuntu2004 systemd[1]: Started kafka.service.
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~

kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ sudo systemctl enable zookeeper
Created symlink /etc/systemd/system/multi-user.target.wants/zookeeper.service → /etc/systemd/system/zookeeper.service.
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ sudo systemctl enable kafka
Created symlink /etc/systemd/system/multi-user.target.wants/kafka.service → /etc/systemd/system/kafka.service.
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic TutorialTopic
Exception in thread "main" joptsimple.UnrecognizedOptionException: zookeeper is not a recognized option
        at joptsimple.OptionException.unrecognizedOption(OptionException.java:108)
        at joptsimple.OptionParser.handleLongOptionToken(OptionParser.java:510)
        at joptsimple.OptionParserState$2.handleArgument(OptionParserState.java:56)
        at joptsimple.OptionParser.parse(OptionParser.java:396)
        at kafka.admin.TopicCommand$TopicCommandOptions.<init>(TopicCommand.scala:567)
        at kafka.admin.TopicCommand$.main(TopicCommand.scala:47)
        at kafka.admin.TopicCommand.main(TopicCommand.scala)
kafka@kafkaubuntu2004:~/kafka$ cat ~/kafka/bin/kafka-topics.sh
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#    http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic TutorialTopic
Exception in thread "main" joptsimple.UnrecognizedOptionException: zookeeper is not a recognized option
        at joptsimple.OptionException.unrecognizedOption(OptionException.java:108)
        at joptsimple.OptionParser.handleLongOptionToken(OptionParser.java:510)
        at joptsimple.OptionParserState$2.handleArgument(OptionParserState.java:56)
        at joptsimple.OptionParser.parse(OptionParser.java:396)
        at kafka.admin.TopicCommand$TopicCommandOptions.<init>(TopicCommand.scala:567)
        at kafka.admin.TopicCommand$.main(TopicCommand.scala:47)
        at kafka.admin.TopicCommand.main(TopicCommand.scala)
kafka@kafkaubuntu2004:~/kafka$ ./kafka-topics.sh --create --topic test-topic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 4
-bash: ./kafka-topics.sh: No such file or directory
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --topic test-topic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 4
Created topic test-topic.
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --topic TutorialTopi^Cbootstrap-server localhost:9092 --replication-factor 1 --partitions 4
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic Tut^C
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create  --replication-factor 1 --partitions 1 --topic TutorialTopic
Exception in thread "main" java.lang.IllegalArgumentException: --bootstrap-server must be specified
        at kafka.admin.TopicCommand$TopicCommandOptions.checkArgs(TopicCommand.scala:619)
        at kafka.admin.TopicCommand$.main(TopicCommand.scala:48)
        at kafka.admin.TopicCommand.main(TopicCommand.scala)
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create  --topic test-topic ^Creplication-factor 1 --partitions 1 --topic TutorialTopic
kafka@kafkaubuntu2004:~/kafka$ 
kafka@kafkaubuntu2004:~/kafka$ ./kafka-topics.sh --create --topic TutorialTopic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1
-bash: ./kafka-topics.sh: No such file or directory
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-topics.sh --create --topic TutorialTopic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1
Created topic TutorialTopic.
kafka@kafkaubuntu2004:~/kafka$ echo "Hello, World" | ~/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic TutorialTopic > /dev/null
kafka@kafkaubuntu2004:~/kafka$ ~/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic TutorialTopic --from-beginning
Hello, World
Hello World from Sammy at DigitalOcean!







kafka@kafkaubuntu2004:~$ history
    1  mkdir ~/Downloads
    2  curl "https://downloads.apache.org/kafka/3.4.1/kafka_2.12-3.4.1.tgz" -o ~/Downloads/kafka.tgz
    3  sudo apt update 
    4  java --version
    5  sudo apt install default-jdk
    6  java --version
    7  mkdir ~/kafka && cd ~/kafka
    8  tar -xvzf ~/Downloads/kafka.tgz --strip 1
    9  vi ~/kafka/config/server.properties
   10  pwd
   11  sudo vi /etc/systemd/system/zookeeper.service
   12  sudo nano /etc/systemd/system/kafka.service
   13  sudo vi /etc/systemd/system/kafka.service
   14  sudo systemctl start kafka
   15  sudo systemctl status kafka
   16  sudo systemctl enable zookeeper
   17  sudo systemctl enable kafka
   18  ~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic TutorialTopic
   19  cat ~/kafka/bin/kafka-topics.sh
   20  ~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic TutorialTopic
   21  ./kafka-topics.sh --create --topic test-topic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 4
   22  ~/kafka/bin/kafka-topics.sh --create --topic test-topic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 4
   23  ~/kafka/bin/kafka-topics.sh --create  --replication-factor 1 --partitions 1 --topic TutorialTopic
   24  ./kafka-topics.sh --create --topic TutorialTopic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1
   25  ~/kafka/bin/kafka-topics.sh --create --topic TutorialTopic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1
   26  echo "Hello, World" | ~/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic TutorialTopic > /dev/null
   27  ~/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic TutorialTopic --from-beginning
   28  history

kafka@kafkaubuntu2004:~$ kafkat

kafkat 0.3.0: Simplified command-line administration for Kafka brokers
usage: kafkat [command] [options]

Here's a list of supported commands:

  brokers                                                             Print available brokers from Zookeeper.
  clean-indexes                                                       Delete untruncated Kafka log indexes from the filesystem.
  cluster-restart help                                                Determine the server restart sequence for kafka
  controller                                                          Print the current controller.
  drain <broker id> [--topic <t>] [--brokers <ids>]                   Reassign partitions from a specific broker to destination brokers.
  elect-leaders [topic]                                               Begin election of the preferred leaders.
  partitions [topic]                                                  Print partitions by topic.
  partitions [topic] --under-replicated                               Print partitions by topic (only under-replicated).
  partitions [topic] --unavailable                                    Print partitions by topic (only unavailable).
  reassign [topics] [--brokers <ids>] [--replicas <n>]                Begin reassignment of partitions.
  resign-rewrite <broker id>                                          Forcibly rewrite leaderships to exclude a broker.
  resign-rewrite <broker id> --force                                  Same as above but proceed if there are no available ISRs.
  set-replication-factor [topic] [--newrf <n>] [--brokers id[,id]]    Set the replication factor of
  shutdown <broker id>                                                Gracefully remove leaderships from a broker (requires JMX).
  topics                                                              Print all topics.
  verify-replicas  [--topics] [--broker <id>] [--print-details] [--print-summary]Check if all partitions in a topic have same number of replicas.


kafka@kafkaubuntu2004:~$ kafkat topics


TutorialTopic
test-topic
__consumer_offsets


kafka@kafkaubuntu2004:~$ kafkat brokers

Broker          Socket
0               kafkaubuntu2004.us-west1-a.c.timebase-ts.internal:9092
kafka@kafkaubuntu2004:~$ 

kafka@kafkaubuntu2004:~$ kafkat controller

The current controller is '0' (kafkaubuntu2004.us-west1-a.c.timebase-ts.internal:9092).

kafka@kafkaubuntu2004:~$ kafkat partitions TutorialTopic

Topic           Partition       Leader          Replicas                                                ISRs
TutorialTopic   0               0               [0]                                                     [0]

kafka@kafkaubuntu2004:~$ kafkat partitions test-topic


Topic           Partition       Leader          Replicas                                                ISRs
test-topic      0               0               [0]                                                     [0]
test-topic      1               0               [0]                                                     [0]
test-topic      2               0               [0]                                                     [0]
test-topic      3               0               [0]                                                     [0]

kafka@kafkaubuntu2004:~$ 










Apache Kafka Server on Ubuntu 20.04

Task 1. Set up Kafka


In the Cloud Console, open the Navigation menu and click Marketplace.

Locate the Apache Kafka® deployment by searching for Apache Kafka.

Click on Apache Kafka Server on Ubuntu Server 20.04. It should look like this:










While you're waiting for deployment, you can check out this quick start which shows how to run the WordCount demo application that is included in Kafka.

Here's the gist of the code, converted to use Java 8 lambda expressions so that it is easier to read (taken from the variant WordCountLambdaExample):

Start the Kafka environment


In the SSH window, you will run the following commands to start all services in the correct order.


Run the following command to start the ZooKeeper service:

cd /opt/kafka/

sudo bin/zookeeper-server-start.sh config/zookeeper.properties

Open another SSH session

Start the Kafka broker service

Run the following command to first change your current path to the Kafka installation directory and start the Kafka broker service:


cd /opt/kafka/
sudo bin/kafka-server-start.sh config/server.properties

Once all services have successfully launched, you will have a basic Kafka environment running and ready to use.

Note: The Kafka application is now configured to use the connector. 

Open another SSH session.

Task 2. Prepare the topics and the input data

You will now send some input data to a Kafka topic, which will be subsequently processed by a Kafka Streams application.

First change your current path to the Kafka installation directory:

cd /opt/kafka/

Now you'll need to create the input topic streams-plaintext-input.


In the same SSH window, execute the following command:


sudo bin/kafka-topics.sh --create \
    --bootstrap-server localhost:9092 \
    --replication-factor 1 \
    --partitions 1 \
    --topic streams-plaintext-input

Next, create the output topic streams-wordcount-output:


sudo bin/kafka-topics.sh --create \
    --bootstrap-server localhost:9092 \
    --replication-factor 1 \
    --partitions 1 \
    --topic streams-wordcount-output

Task 3. Process the input data with Kafka streams


Now that you have generated some input data, you can run your first Kafka Streams based Java application.

You will run the WordCount demo application, which is included in Kafka. It implements the WordCount algorithm, which computes a word occurrence histogram from an input text.

However, unlike other WordCount examples you might have seen before that operate on finite, bounded data, the WordCount demo application behaves slightly differently because it is designed to operate on an infinite, unbounded stream of input data.

Similar to the bounded variant, it is a stateful algorithm that tracks and updates the counts of words. However, since it must assume potentially unbounded input data, it will periodically output its current state and results while continuing to process more data because it cannot know when it has processed "all" the input data.

This is a typical difference between the class of algorithms that operate on unbounded streams of data and, say, batch processing algorithms such as Hadoop MapReduce. It will be easier to understand this difference once you inspect the actual output data later on.

Kafka's WordCount demo application is bundled with Confluent Platform, which means you can run it without further ado, i.e. you do not need to compile any Java sources and so on.

Now, execute the following command to run the WordCount demo application. You can safely ignore any warn log messages:


ketan_patel@kafka-ubuntu-1-vm:~$ cd /opt/kafka
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ sudo bin/kafka-topics.sh --create \
>     --bootstrap-server localhost:9092 \
>     --replication-factor 1 \
>     --partitions 1 \
>     --topic streams-plaintext-input
Created topic streams-plaintext-input.
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ sudo bin/kafka-topics.sh --create \
>     --bootstrap-server localhost:9092 \
>     --replication-factor 1 \
>     --partitions 1 \
>     --topic streams-wordcount-output
Created topic streams-wordcount-output.
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ echo -e "all streams lead to kafka\nhello kafka streams\njoin kafka summit" > /tmp/file-input.txt
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ tail /tmp/file-input.txt 
all streams lead to kafka
hello kafka streams
join kafka summit
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ cat /tmp/file-input.txt | sudo bin/kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ sudo bin/kafka-run-class.sh org.apache.kafka.streams.examples.wordcount.WordCountDemo
[2023-12-12 21:06:06,959] WARN Using an OS temp directory in the state.dir property can cause failures with writing the checkpoint file due to the fact that this directory can be cleared by the OS. Resolved state.dir: [/tmp/kafka-streams] (org.apache.kafka.streams.processor.internals.StateDirectory)
[2023-12-12 21:06:07,287] WARN Error while loading kafka-streams-version.properties (org.apache.kafka.streams.internals.metrics.ClientMetrics)
java.lang.NullPointerException: inStream parameter is null
        at java.base/java.util.Objects.requireNonNull(Objects.java:246)
        at java.base/java.util.Properties.load(Properties.java:406)
        at org.apache.kafka.streams.internals.metrics.ClientMetrics.<clinit>(ClientMetrics.java:53)
        at org.apache.kafka.streams.KafkaStreams.<init>(KafkaStreams.java:894)
        at org.apache.kafka.streams.KafkaStreams.<init>(KafkaStreams.java:856)
        at org.apache.kafka.streams.KafkaStreams.<init>(KafkaStreams.java:826)
        at org.apache.kafka.streams.KafkaStreams.<init>(KafkaStreams.java:738)
        at org.apache.kafka.streams.examples.wordcount.WordCountDemo.main(WordCountDemo.java:92)
[2023-12-12 21:06:07,774] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 2 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:07,882] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 4 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:07,985] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 7 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,088] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 10 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,191] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 13 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,293] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 16 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,396] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 18 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,497] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 20 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
[2023-12-12 21:06:08,600] WARN [Consumer clientId=streams-wordcount-49801b8a-a532-40e8-85c1-9dd57f442a40-StreamThread-1-consumer, groupId=streams-wordcount] Error while fetching metadata with correlation id 25 : {streams-wordcount-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)


^Cketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 


Task 4. Inspect the output data


You can now inspect the output of the WordCount demo application by reading from its output topic streams-wordcount-output:



ANOTHER SSH WINDOW:

ketan_patel@kafka-ubuntu-1-vm:~$ cd /opt/kafka
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ sudo bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
>     --topic streams-wordcount-output \
>     --from-beginning \
>     --formatter kafka.tools.DefaultMessageFormatter \
>     --property print.key=true \
>     --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
>     --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
all     1
streams 1
lead    1
to      1
kafka   1
hello   1
kafka   2
streams 2
join    1
kafka   3
summit  1



^C^C
Processed a total of 11 messages
ketan_patel@kafka-ubuntu-1-vm:/opt/kafka$ 


Migrate Existing Prometheus Monitoring Workloads to Google Cloud

Overview
In this lab, you will explore how to use Managed Service for Prometheus in a self-deployed data collection mode. You can also utilize managed data collection as well.

With self-deployed data collections, you manage your Prometheus installation as usual. The only difference from upstream Prometheus is that you run the Managed Service for Prometheus drop-in replacement binary instead of the upstream Prometheus binary.

You can find more information on considerations to make when choosing a managed vs. self-managed data collection at the following documentation link: Data collection with Managed Service for Prometheus.


Deploy the Managed Service for Prometheus
Create a self managed data collection for scraping metrics
Understand considerations to make when using managed vs. self-managed data collections
Utilize Grafana to query Prometheus metrics data




ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/prometheus-engine/v0.4.3-gke.0/examples/example-app.yaml

deployment.apps/prom-example created

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/prometheus-engine/v0.4.3-gke.0/examples/prometheus.yaml

clusterrole.rbac.authorization.k8s.io/gmp-test:prometheus-test created
clusterrolebinding.rbac.authorization.k8s.io/gmp-test:prometheus-test created
service/prometheus-test created
statefulset.apps/prometheus-test created
configmap/prometheus-test created

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test get pod

NAME                              READY   STATUS    RESTARTS      AGE
helloworld-gke-5f574446d7-97gx7   1/1     Running   0             80m
prom-example-7987cfb88f-6wnq2     1/1     Running   0             54s
prom-example-7987cfb88f-bv5f5     1/1     Running   0             54s
prom-example-7987cfb88f-thsqv     1/1     Running   0             54s
prometheus-test-0                 2/2     Running   1 (11s ago)   20s

ketan_patel@cloudshell:~ (new-user-learning)$ export PROJECT_ID=$(gcloud config get-value project)

Your active configuration is: [cloudshell-533]

ketan_patel@cloudshell:~ (new-user-learning)$ curl https://raw.githubusercontent.com/GoogleCloudPlatform/prometheus-engine/v0.4.3-gke.0/examples/frontend.yaml |

sed "s/\$PROJECT_ID/$PROJECT_ID/" | kubectl apply -n gmp-test -f -
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  1514  100  1514    0     0   8096      0 --:--:-- --:--:-- --:--:--  8096
deployment.apps/frontend created
service/frontend created

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test port-forward svc/frontend 9090

Forwarding from 127.0.0.1:9090 -> 9090
Handling connection for 9090
Handling connection for 9090






ketan_patel@cloudshell:~/kube-prometheus (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/prometheus-engine/v0.4.3-gke.0/examples/grafana.yaml
deployment.apps/grafana created
service/grafana created


ketan_patel@cloudshell:~/kube-prometheus (new-user-learning)$ kubectl -n gmp-test port-forward svc/grafana 3001:3000
error: unable to forward port because pod is not running. Current status=Pending


ketan_patel@cloudshell:~/kube-prometheus (new-user-learning)$ kubectl get pods -n gmp-test
NAME                              READY   STATUS    RESTARTS        AGE
frontend-694bd6ff76-2wcr5         1/1     Running   0               8m17s
frontend-694bd6ff76-qp4bs         1/1     Running   0               8m17s
grafana-9fdc4b86b-22wwr           1/1     Running   0               31s
helloworld-gke-5f574446d7-97gx7   1/1     Running   0               90m
prom-example-7987cfb88f-6wnq2     1/1     Running   0               10m
prom-example-7987cfb88f-bv5f5     1/1     Running   0               10m
prom-example-7987cfb88f-thsqv     1/1     Running   0               10m
prometheus-test-0                 2/2     Running   1 (9m42s ago)   9m51s
 
ketan_patel@cloudshell:~/kube-prometheus (new-user-learning)$ kubectl -n gmp-test port-forward svc/grafana 3001:3000
Forwarding from 127.0.0.1:3001 -> 3000
Handling connection for 3001
Handling connection for 3001






Gcloud commands


ketan_patel@cloudshell:~ (new-user-learning)$ gcloud config list compute/region
[compute]
region (unset)

Your active configuration is: [cloudshell-22327]
ketan_patel@cloudshell:~ (new-user-learning)$ gcloud config list compute/zone
[compute]
zone (unset)

Your active configuration is: [cloudshell-22327]

ketan_patel@cloudshell:~ (new-user-learning)$ gcloud config set compute/region us-west1
Updated property [compute/region].


ketan_patel@cloudshell:~ (new-user-learning)$ gcloud config set compute/zone us-west1-b
Updated property [compute/zone].

ketan_patel@cloudshell:~ (new-user-learning)$ gcloud config list
[accessibility]
screen_reader = True
[component_manager]
disable_update_check = True
[compute]
gce_metadata_read_timeout_sec = 30
region = us-west1
zone = us-west1-b
[core]
account = ketan.patel@supernal.aero
disable_usage_reporting = True
project = new-user-learning
[metrics]
environment = devshell

Your active configuration is: [cloudshell-22327]
ketan_patel@cloudshell:~ (new-user-learning)$ 

GCP - Workshop notes

 




Using Prometheus for Monitoring on Google Cloud: Qwik Start

Overview

Set up a Google Kubernetes Engine cluster, then deploy the Managed Service for Prometheus to ingest metrics from a simple application.

Managed Service for Prometheus is Google Cloud's fully managed storage and query service for Prometheus metrics. This service is built on top of Monarch, the same globally scalable data store as Cloud Monitoring.

A thin fork of Prometheus replaces existing Prometheus deployments and sends data to the managed service with no user intervention. This data can then be queried by using PromQL through the Prometheus Query API supported by the managed service and by using the existing Cloud Monitoring query mechanisms.


Objectives
Deploy the Managed Service for Prometheus to a GKE cluster
Deploy a Python application to monitor
Create a Cloud Monitoring dashboard to view metrics collected



student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ gcloud auth list
Credentialed Accounts

ACTIVE: *
ACCOUNT: student-04-61f5f7cf52ff@qwiklabs.net

To set the active account, run:



student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ gcloud config list project
[core]
project = qwiklabs-gcp-00-a051edc2be39

Your active configuration is: [cloudshell-8839]



Run the following command to deploy a standard GKE cluster, which will prompt you to authorize and enable the GKE API:


student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ gcloud beta container clusters create gmp-cluster --num-nodes=1 --zone us-east1-b --enable-managed-prometheus

Default change: VPC-native is the default mode during cluster creation for versions greater than 1.21.0-gke.1500. To create advanced routes based clusters, please pass the `--no-enable-ip-alias` flag
Default change: During creation of nodepools or autoscaling configuration changes for cluster versions greater than 1.24.1-gke.800 a default location policy is applied. For Spot and PVM it defaults to ANY, and for all other VM kinds a BALANCED policy is used. To change the default values use the `--location-policy` flag.Note: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s).
Creating cluster gmp-cluster in us-east1-b... Cluster is being health-checked (master is healthy)...done.                       
Created [https://container.googleapis.com/v1beta1/projects/qwiklabs-gcp-00-a051edc2be39/zones/us-east1-b/clusters/gmp-cluster].
To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/us-east1-b/gmp-cluster?project=qwiklabs-gcp-00-a051edc2be39
kubeconfig entry generated for gmp-cluster.
NAME: gmp-cluster
LOCATION: us-east1-b
MASTER_VERSION: 1.27.3-gke.100
MASTER_IP: 35.196.141.45
MACHINE_TYPE: e2-medium
NODE_VERSION: 1.27.3-gke.100
NUM_NODES: 1
STATUS: RUNNING
student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ 

Run the following command to authenticate to the cluster:

 student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ gcloud container clusters get-credentials gmp-cluster --zone us-east1-b
Fetching cluster endpoint and auth data.
kubeconfig entry generated for gmp-cluster.

Task 2. Deploy the Prometheus service

Run the following command to create a namespace to do the work in:



student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl create ns gmp-test
namespace/gmp-test created
student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl get ns
NAME              STATUS   AGE
default           Active   5m36s
gmp-public        Active   4m31s
gmp-system        Active   4m31s
gmp-test          Active   8s
kube-node-lease   Active   5m36s
kube-public       Active   5m36s
kube-system       Active   5m37s
student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$


Task 3. Deploy the application

Deploy a simple application which emits metrics at the /metrics endpoint:

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_deployment.yaml

deployment.apps/helloworld-gke created

--------------------- ----------------------------------------
# This file configures the hello-world app which serves public web traffic.
apiVersion: apps/v1
kind: Deployment
metadata:
  name: helloworld-gke
spec:
  replicas: 1
  selector:
    matchLabels:
      app: hello
  template:
    metadata:
      labels:
        app: hello
    spec:
      containers:
      - name: hello-app
        image: gcr.io/ops-demo-330920/flask_telemetry:61a2a7aabc7077ef474eb24f4b69faeab47deed9
        # This app listens on port 4000 for web traffic by default.
        ports:
        - containerPort: 4000
          name: flaskport
        env:
          - name: PORT
            value: "4000"

----------------------------- ----------------------------------------


student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_service.yaml

service/hello created

--------------------- --------------------------------------
# The hello service provides a load-balancing proxy over the hello-app
# pods. By specifying the type as a 'LoadBalancer', Kubernetes Engine will
# create an external HTTP load balancer.
apiVersion: v1
kind: Service
metadata:
  name: hello
spec:
  type: LoadBalancer
  selector:
    app: hello
  ports:
  - port: 80
    targetPort: 4000
------------------------- ---------------------------------------------

Verify that this simple Python Flask app is serving metrics with the following command:

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ url=$(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}')


student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ echo $url
34.148.86.126

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ curl $url/metrics

# HELP flask_exporter_info Multiprocess metric
# TYPE flask_exporter_info gauge
flask_exporter_info{version="0.18.5"} 1.0


Tell Prometheus where to begin scraping the metrics from by applying the PodMonitoring file:

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/prom_deploy.yaml

podmonitoring.monitoring.googleapis.com/prom-example created


Before finishing up here, generate some load on the application with a really simple interaction with the app:


student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ timeout 120 bash -c -- 'while true; do curl $(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}'); sleep $((RANDOM % 4)) ; done'

{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 17:35:19 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 17:35:20 GMT"}

<TRUNCATED...>

{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 17:37:14 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 17:37:17 GMT"}


Task 4. Observing the app via metrics

In this last section, quickly use gcloud to deploy a custom monitoring dashboard that shows the metrics from this application in a line chart.

Be sure to copy the entirety of this code block:



student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ gcloud monitoring dashboards create --config='''
{
  "category": "CUSTOM",
  "displayName": "Prometheus Dashboard Example",
  "mosaicLayout": {
    "columns": 12,
    "tiles": [
      {
        "height": 4,
        "widget": {
          "title": "prometheus/flask_http_request_total/counter [MEAN]",
          "xyChart": {
            "chartOptions": {
              "mode": "COLOR"
            },
            "dataSets": [
              {
                "minAlignmentPeriod": "60s",
                "plotType": "LINE",
                "targetAxis": "Y1",
                "timeSeriesQuery": {
                  "apiSource": "DEFAULT_CLOUD",
                  "timeSeriesFilter": {
                    "aggregation": {
                      "alignmentPeriod": "60s",
                      "crossSeriesReducer": "REDUCE_NONE",
                      "perSeriesAligner": "ALIGN_RATE"
                    },
                    "filter": "metric.type=\"prometheus.googleapis.com/flask_http_request_total/counter\" resource.type=\"prometheus_target\"",
                    "secondaryAggregation": {
                      "alignmentPeriod": "60s",
''' ] } "yPos": 0,,e": "LINEAR", "0s",r": "ALIGN_MEAN"AN",

Created [a2cc4385-f866-4095-b946-361a9f7fb883].
student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ 









\student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl get deployment -A

NAMESPACE     NAME                            READY   UP-TO-DATE   AVAILABLE   AGE
gmp-system    gmp-operator                    1/1     1            1           46m
gmp-system    rule-evaluator                  1/1     1            1           46m
gmp-test      helloworld-gke                  1/1     1            1           40m
kube-system   event-exporter-gke              1/1     1            1           46m
kube-system   konnectivity-agent              1/1     1            1           46m
kube-system   konnectivity-agent-autoscaler   1/1     1            1           46m
kube-system   kube-dns                        1/1     1            1           46m
kube-system   kube-dns-autoscaler             1/1     1            1           46m
kube-system   l7-default-backend              1/1     1            1           46m
kube-system   metrics-server-v0.5.2           1/1     1            1           46m

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl get nodes 

NAME                                         STATUS   ROLES    AGE   VERSION
gke-gmp-cluster-default-pool-59dbcd3c-ldx9   Ready    <none>   45m   v1.27.3-gke.100

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl get pods -A

NAMESPACE     NAME                                                    READY   STATUS    RESTARTS      AGE
gmp-system    alertmanager-0                                          2/2     Running   0             46m
gmp-system    collector-95mp5                                         2/2     Running   0             45m
gmp-system    gmp-operator-5f89fc5d7c-9zhkx                           1/1     Running   0             46m
gmp-system    rule-evaluator-f578d69c7-jpxjq                          2/2     Running   1 (45m ago)   45m
gmp-test      helloworld-gke-5f574446d7-rqhc8                         1/1     Running   0             41m
kube-system   event-exporter-gke-7bf6c99dcb-5rhwn                     2/2     Running   0             46m
kube-system   fluentbit-gke-zjp76                                     2/2     Running   0             45m
kube-system   gke-metrics-agent-8hfcd                                 2/2     Running   0             45m
kube-system   konnectivity-agent-5fc7ff9689-zrmj9                     1/1     Running   0             46m
kube-system   konnectivity-agent-autoscaler-5d9dbcc6d8-tt7h2          1/1     Running   0             46m
kube-system   kube-dns-5bfd847c64-wb726                               4/4     Running   0             46m
kube-system   kube-dns-autoscaler-84b8db4dc7-c42rz                    1/1     Running   0             46m
kube-system   kube-proxy-gke-gmp-cluster-default-pool-59dbcd3c-ldx9   1/1     Running   0             44m
kube-system   l7-default-backend-d86c96845-9h7p9                      1/1     Running   0             46m
kube-system   metrics-server-v0.5.2-6bf74b5d5f-tppp8                  2/2     Running   0             45m
kube-system   pdcsi-node-nb5dd                                        2/2     Running   0             45m

student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ kubectl get services -A

NAMESPACE     NAME                   TYPE           CLUSTER-IP    EXTERNAL-IP     PORT(S)            AGE
default       kubernetes             ClusterIP      10.0.0.1      <none>          443/TCP            47m
gmp-system    alertmanager           ClusterIP      None          <none>          9093/TCP           46m
gmp-system    gmp-operator           ClusterIP      10.0.13.221   <none>          8443/TCP,443/TCP   46m
gmp-test      hello                  LoadBalancer   10.0.8.51     34.148.86.126   80:30976/TCP       40m
kube-system   default-http-backend   NodePort       10.0.5.244    <none>          80:30154/TCP       46m
kube-system   kube-dns               ClusterIP      10.0.0.10     <none>          53/UDP,53/TCP      47m
kube-system   metrics-server         ClusterIP      10.0.3.242    <none>          443/TCP            46m
student_04_61f5f7cf52ff@cloudshell:~ (qwiklabs-gcp-00-a051edc2be39)$ 







===================== ===================== =====================

 
ketan_patel@cloudshell:~ (new-user-learning)$ gcloud beta container clusters create gmp-cluster --num-nodes=1  --enable-managed-prometheus
Default change: VPC-native is the default mode during cluster creation for versions greater than 1.21.0-gke.1500. To create advanced routes based clusters, please pass the `--no-enable-ip-alias` flag
Default change: During creation of nodepools or autoscaling configuration changes for cluster versions greater than 1.24.1-gke.800 a default location policy is applied. For Spot and PVM it defaults to ANY, and for all other VM kinds a BALANCED policy is used. To change the default values use the `--location-policy` flag.
Note: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s).
Creating cluster gmp-cluster in us-west1-b... Cluster is being health-checked (master is healthy)...done.                                                                                     
Created [https://container.googleapis.com/v1beta1/projects/new-user-learning/zones/us-west1-b/clusters/gmp-cluster].
To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/us-west1-b/gmp-cluster?project=new-user-learning
kubeconfig entry generated for gmp-cluster.
NAME: gmp-cluster
LOCATION: us-west1-b
MASTER_VERSION: 1.27.3-gke.100
MASTER_IP:  X.X.X.X
MACHINE_TYPE: e2-medium
NODE_VERSION: 1.27.3-gke.100
NUM_NODES: 1
STATUS: RUNNING





ketan_patel@cloudshell:~ (new-user-learning)$ gcloud container clusters get-credentials gmp-cluster
Fetching cluster endpoint and auth data.
kubeconfig entry generated for gmp-cluster.


ketan_patel@cloudshell:~ (new-user-learning)$ kubectl config current-context
gke_new-user-learning_us-west1-b_gmp-cluster

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get deployment
No resources found in default namespace.
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get deployment -A
NAMESPACE     NAME                            READY   UP-TO-DATE   AVAILABLE   AGE
gmp-system    gmp-operator                    1/1     1            1           29m
gmp-system    rule-evaluator                  1/1     1            1           29m
kube-system   event-exporter-gke              1/1     1            1           30m
kube-system   konnectivity-agent              1/1     1            1           29m
kube-system   konnectivity-agent-autoscaler   1/1     1            1           29m
kube-system   kube-dns                        1/1     1            1           30m
kube-system   kube-dns-autoscaler             1/1     1            1           30m
kube-system   l7-default-backend              1/1     1            1           29m
kube-system   metrics-server-v0.5.2           1/1     1            1           29m

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get nodes
NAME                                         STATUS   ROLES    AGE   VERSION
gke-gmp-cluster-default-pool-4defca36-jkbh   Ready    <none>   29m   v1.27.3-gke.100


ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get services -A
NAMESPACE     NAME                   TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)            AGE
default       kubernetes             ClusterIP   10.0.0.1     <none>        443/TCP            30m
gmp-system    alertmanager           ClusterIP   None         <none>        9093/TCP           29m
gmp-system    gmp-operator           ClusterIP   10.0.9.243   <none>        8443/TCP,443/TCP   29m
kube-system   default-http-backend   NodePort    10.0.6.173   <none>        80:31366/TCP       30m
kube-system   kube-dns               ClusterIP   10.0.0.10    <none>        53/UDP,53/TCP      30m
kube-system   metrics-server         ClusterIP   10.0.10.0    <none>        443/TCP            29m

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get pods -A
NAMESPACE     NAME                                                    READY   STATUS    RESTARTS      AGE
gmp-system    alertmanager-0                                          2/2     Running   0             29m
gmp-system    collector-gr6zl                                         2/2     Running   0             28m
gmp-system    gmp-operator-6888d59866-8982x                           1/1     Running   0             30m
gmp-system    rule-evaluator-77976dd4d9-qsqmx                         2/2     Running   2 (28m ago)   28m
kube-system   event-exporter-gke-7bf6c99dcb-cksqf                     2/2     Running   0             30m
kube-system   fluentbit-gke-bmcr6                                     2/2     Running   0             29m
kube-system   gke-metrics-agent-2cwx7                                 2/2     Running   0             29m
kube-system   konnectivity-agent-758789cc74-xwk4w                     1/1     Running   0             30m
kube-system   konnectivity-agent-autoscaler-5d9dbcc6d8-9qltt          1/1     Running   0             30m
kube-system   kube-dns-5bfd847c64-jjr4j                               4/4     Running   0             30m
kube-system   kube-dns-autoscaler-84b8db4dc7-wp45d                    1/1     Running   0             30m
kube-system   kube-proxy-gke-gmp-cluster-default-pool-4defca36-jkbh   1/1     Running   0             28m
kube-system   l7-default-backend-d86c96845-rgqwf                      1/1     Running   0             30m
kube-system   metrics-server-v0.5.2-6bf74b5d5f-wpp9p                  2/2     Running   0             28m
kube-system   pdcsi-node-h95cv                                        2/2     Running   0             29m

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl create ns gmp-test
namespace/gmp-test created

ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get ns
NAME              STATUS   AGE
default           Active   31m
gmp-public        Active   30m
gmp-system        Active   30m
gmp-test          Active   24s
kube-node-lease   Active   31m
kube-public       Active   31m
kube-system       Active   31m



ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_deployment.yaml
deployment.apps/helloworld-gke created






ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_service.yaml
service/hello created









ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get pods -n gmp-test
NAME                              READY   STATUS    RESTARTS   AGE
helloworld-gke-5f574446d7-97gx7   1/1     Running   0          21s
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get services -n gmp-test
NAME    TYPE           CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
hello   LoadBalancer   10.0.7.206   <pending>     80:30322/TCP   18s
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get services -n gmp-test
NAME    TYPE           CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
hello   LoadBalancer   10.0.7.206   <pending>     80:30322/TCP   31s
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ url=$(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}')
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ echo $url

ketan_patel@cloudshell:~ (new-user-learning)$ echo $url

ketan_patel@cloudshell:~ (new-user-learning)$ url=$(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}')
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ echo $url
35.199.168.92
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ curl $url/metrics
# HELP flask_exporter_info Multiprocess metric
# TYPE flask_exporter_info gauge
flask_exporter_info{version="0.18.5"} 1.0
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/prom_deploy.yaml
podmonitoring.monitoring.googleapis.com/prom-example created
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ timeout 120 bash -c -- 'while true; do curl $(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}'); sleep $((RANDOM % 4)) ; done'
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:20 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:23 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:24 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:26 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:27 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:29 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:31 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:34 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:35 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:38 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:41 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:42 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:43 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:46 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:48 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:50 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:50 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:54 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:54 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:56 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:57 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:59 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:28:59 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:02 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:05 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:05 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:08 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:09 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:12 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:15 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:18 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:21 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:22 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:25 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:28 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:29 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:31 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:31 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:33 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:33 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:36 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:36 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:38 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:40 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:41 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:43 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:46 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:47 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:49 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:52 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:54 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:55 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:57 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:57 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:29:58 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:01 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:01 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:02 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:03 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:04 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:06 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:07 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:10 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:10 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:12 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:15 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:15 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:16 GMT"}
{"message":"Hello World!","severity":"info","timestamp":"Thu, 26 Oct 2023 19:30:19 GMT"}
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$  
ketan_patel@cloudshell:~ (new-user-learning)$ 
ketan_patel@cloudshell:~ (new-user-learning)$  
ketan_patel@cloudshell:~ (new-user-learning)$ gcloud monitoring dashboards create --config='''
{
  "category": "CUSTOM",
  "displayName": "Prometheus Dashboard Example",
  "mosaicLayout": {
    "columns": 12,
    "tiles": [
      {
        "height": 4,
        "widget": {
          "title": "prometheus/flask_http_request_total/counter [MEAN]",
          "xyChart": {
            "chartOptions": {
              "mode": "COLOR"
            },
            "dataSets": [
              {
                "minAlignmentPeriod": "60s",
                "plotType": "LINE",
                "targetAxis": "Y1",
                "timeSeriesQuery": {
                  "apiSource": "DEFAULT_CLOUD",
                  "timeSeriesFilter": {
                    "aggregation": {
                      "alignmentPeriod": "60s",
                      "crossSeriesReducer": "REDUCE_NONE",
                      "perSeriesAligner": "ALIGN_RATE"
                    },
                    "filter": "metric.type=\"prometheus.googleapis.com/flask_http_request_total/counter\" resource.type=\"pro''' ] } "yPos": 0,,e": "LINEAR", "0s",r": "ALIGN_MEAN"AN",
Created [7ac033b1-4bd1-4116-b236-397d55fb783c].
ketan_patel@cloudshell:~ (new-user-learning)$ 





523  gcloud beta container clusters create gmp-cluster --num-nodes=1  --enable-managed-prometheus
  524  gcloud container clusters get-credentials gmp-cluster
  525  kubectl config current-context
  526  kubectl get deployment
  527  kubectl get deployment -A
  528  kubectl get nodes
  529  kubectl get services -A
  530  kubectl get pods -A
  531  kubectl create ns gmp-test
  532  kubectl get ns
  533  kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_deployment.yaml
  534  kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/flask_service.yaml
  535  kubectl get pods -n gmp-test
  536  kubectl get services -n gmp-test
  537  url=$(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}')
  538  echo $url
  539  url=$(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}')
  540  echo $url
  541  curl $url/metrics
  542  kubectl -n gmp-test apply -f https://raw.githubusercontent.com/kyleabenson/flask_telemetry/master/gmp_prom_setup/prom_deploy.yaml
  543  timeout 120 bash -c -- 'while true; do curl $(kubectl get services -n gmp-test -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}'); sleep $((RANDOM % 4)) ; done'
  544  gcloud monitoring dashboards create --config='''
{
  "category": "CUSTOM",
  "displayName": "Prometheus Dashboard Example",
  "mosaicLayout": {
    "columns": 12,
    "tiles": [
      {
        "height": 4,
        "widget": {
          "title": "prometheus/flask_http_request_total/counter [MEAN]",
          "xyChart": {
            "chartOptions": {
              "mode": "COLOR"
            },
            "dataSets": [
              {
                "minAlignmentPeriod": "60s",
                "plotType": "LINE",
                "targetAxis": "Y1",
                "timeSeriesQuery": {
                  "apiSource": "DEFAULT_CLOUD",
                  "timeSeriesFilter": {
                    "aggregation": {
                      "alignmentPeriod": "60s",
                      "crossSeriesReducer": "REDUCE_NONE",
                      "perSeriesAligner": "ALIGN_RATE"
                    },
                    "filter": "metric.type=\"prometheus.googleapis.com/flask_http_request_total/counter\" resource.type=\"prometheus_target\"",
                    "secondaryAggregation": {
                      "alignmentPeriod": "60s",
                      "crossSeriesReducer": "REDUCE_MEAN",
                      "groupByFields": [
                        "metric.label.\"status\""
                      ],
                      "perSeriesAligner": "ALIGN_MEAN"
                    }
                  }
                }
              }
            ],
            "thresholds": [],
            "timeshiftDuration": "0s",
            "yAxis": {
              "label": "y1Axis",
              "scale": "LINEAR"
            }
          }
        },
        "width": 6,
        "xPos": 0,
        "yPos": 0
      }
    ]
  }
}
'''
  545  history
ketan_patel@cloudshell:~ (new-user-learning)




ketan_patel@cloudshell:~ (new-user-learning)$ kubectl get pods -n gmp-test
NAME                              READY   STATUS    RESTARTS   AGE
helloworld-gke-5f574446d7-97gx7   1/1     Running   0          68m
ketan_patel@cloudshell:~ (new-user-learning)$ kubectl describe pod -n gmp-test
Name:             helloworld-gke-5f574446d7-97gx7
Namespace:        gmp-test
Priority:         0
Service Account:  default
Node:             gke-gmp-cluster-default-pool-4defca36-jkbh/10.138.0.32
Start Time:       Thu, 26 Oct 2023 19:26:29 +0000
Labels:           app=hello
                  pod-template-hash=5f574446d7
Annotations:      <none>
Status:           Running
IP:               10.124.0.16
IPs:
  IP:           10.124.0.16
Controlled By:  ReplicaSet/helloworld-gke-5f574446d7
Containers:
  hello-app:
    Container ID:   containerd://292d9ebcbda1b1ad6438ba5e28b56a4dc22c63ed9442c3b34bd88314866c5e02
    Image:          gcr.io/ops-demo-330920/flask_telemetry:61a2a7aabc7077ef474eb24f4b69faeab47deed9
    Image ID:       gcr.io/ops-demo-330920/flask_telemetry@sha256:ff5bc984a8aecc0a5d3d32b2ad5ea81ff16574dc71351f23fb79c45489a29f8c
    Port:           4000/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Thu, 26 Oct 2023 19:26:47 +0000
    Ready:          True
    Restart Count:  0
    Environment:
      PORT:  4000
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-c7snr (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  kube-api-access-c7snr:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:                      <none>
ketan_patel@cloudshell:~ (new-user-learning)$ 

AppEngine - Python

tudent_04_347b5286260a@cloudshell:~/python-docs-samples/appengine/standard_python3/hello_world (qwiklabs-gcp-00-88834e0beca1)$ sudo apt upda...