../../images/logo.svg

NGINX reverse

Use nginx as reverse proxy in front of multiple clusters

$ dnf install -y nginx nginx-mod-stream.x86_64

Add in nginx.conf include /etc/nginx/passthrough.conf;

passthrough.conf

stream {

    map $ssl_preread_server_name $internalport {
	hostnames;
        *.apps.sno1.domain     9441;
        *.apps.sno2.domain      9442;
        api.sno1.domain      6441;
        api.sno2.domain      6442;
    }


    upstream sno2_api {
        server 192.168.0.109:6443 max_fails=3 fail_timeout=10s;
    }
    upstream sno2_ingress {
        server 192.168.0.109:443 max_fails=3 fail_timeout=10s;
    }
    upstream sno1_api {
        server 192.168.0.110:6443 max_fails=3 fail_timeout=10s;
    }
    upstream sno1_ingress {
        server 192.168.0.110:6443 max_fails=3 fail_timeout=10s;
    }

log_format basic '$remote_addr [$time_local] '
                 '$protocol $status $bytes_sent $bytes_received '
                 '$session_time "$upstream_addr" '
                 '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';

    access_log /var/log/nginx/access.log basic;
    error_log  /var/log/nginx/error.log;
    server {
        listen                  443;
        ssl_preread             on;
        proxy_connect_timeout   20s;  # max time to connect to pserver
        proxy_timeout           30s;  # max time between successive reads or writes
        proxy_pass              127.0.0.1:$internalport;
    } 
    server {
        listen                  6443;
        ssl_preread             on;
        proxy_connect_timeout   20s;  # max time to connect to pserver
        proxy_timeout           30s;  # max time between successive reads or writes
        proxy_pass              127.0.0.1:$internalport;
    }
    server {
        listen 9441;
        proxy_pass sno1_ingress;
        proxy_next_upstream on;
    }
    server {
        listen 9442;
        proxy_pass sno2_ingress;
        proxy_next_upstream on;
    }
    server {
        listen 6441;
        proxy_pass sno1_api;
        proxy_next_upstream on;
    }
    server {
        listen 6442;
        proxy_pass sno2_api;
        proxy_next_upstream on;
    }
}

ODF installation

Install Openshift Data Foundation from Operator Hub

Create a StorageSystem using “Connect an external storage platform” of Red Hat Ceph Storage type

Download ceph-external-cluster-details-exporter.py script and run it on your ceph admin node

$ python3 ceph-external-cluster-details-exporter.py --rbd-data-pool-name testrbd --cephfs-data-pool-name cephfs.testfs.data --rgw-endpoint 10.0.0.n:80 --cephfs-filesystem-name testfs

Sample output :

[{"name": "rook-ceph-mon-endpoints", "kind": "ConfigMap", "data": {"data": "ceph1=10.0.0.n:6789", "maxMonId": "0", "mapping": "{}"}}, {"name": "rook-ceph-mon", "kind": "Secret", "data": {"admin-secret": "admin-secret", "fsid": "5dabcb8e-ad19-11ed-a179-005056af8aeb", "mon-secret": "mon-secret"}}, {"name": "rook-ceph-operator-creds", "kind": "Secret", "data": {"userID": "client.healthchecker", "userKey": "********************"}}, {"name": "rook-csi-rbd-node", "kind": "Secret", "data": {"userID": "csi-rbd-node", "userKey": "********"}}, {"name": "ceph-rbd", "kind": "StorageClass", "data": {"pool": "testrbd"}}, {"name": "monitoring-endpoint", "kind": "CephCluster", "data": {"MonitoringEndpoint": "10.0.0.n", "MonitoringPort": "9283"}}, {"name": "rook-ceph-dashboard-link", "kind": "Secret", "data": {"userID": "ceph-dashboard-link", "userKey": "https://10.0.0.n:8443/"}}, {"name": "rook-csi-rbd-provisioner", "kind": "Secret", "data": {"userID": "csi-rbd-provisioner", "userKey": "************"}}, {"name": "rook-csi-cephfs-provisioner", "kind": "Secret", "data": {"adminID": "csi-cephfs-provisioner", "adminKey": "***********"}}, {"name": "rook-csi-cephfs-node", "kind": "Secret", "data": {"adminID": "csi-cephfs-node", "adminKey": "*************"}}, {"name": "cephfs", "kind": "StorageClass", "data": {"fsName": "testfs", "pool": "cephfs.testfs.data"}}, {"name": "ceph-rgw", "kind": "StorageClass", "data": {"endpoint": "10.0.0.n:80", "poolPrefix": "default"}}, {"name": "rgw-admin-ops-user", "kind": "Secret", "data": {"accessKey": "************************", "secretKey": "**********************"}}]

Save the json file and import it in the StorageSystem wizard

rbd pool must be replicated because Erasure-Coded RBD pool(s) are not supported in ODF.

Network Observability

Red Hat offers cluster administrators the Network Observability Operator to observe the network traffic for OpenShift Container Platform clusters. The Network Observability uses the eBPF technology to create network flows. The network flows are then enriched with OpenShift Container Platform information and stored in Loki. You can view and analyze the stored network flows information in the OpenShift Container Platform console for further insight and troubleshooting.

Network Observability Operator in OpenShift Container Platform

Cluster versions

Current

$ oc get clusterversion -o json|jq ".items[0].spec"

{
  "channel": "candidate-4.12",
  "clusterID": "1ad501e2-5e60-45a5-9890-35d56bc06a4d",
  "desiredUpdate": {
    "force": false,
    "image": "quay.io/openshift-release-dev/ocp-release@sha256:31c7741fc7bb73ff752ba43f5acf014b8fadd69196fc522241302de918066cb1",
    "version": "4.12.2"
  }
}

History

$ oc get clusterversion -o json|jq ".items[0].status.history"


[
  {
    "completionTime": "2023-02-09T10:32:35Z",
    "image": "quay.io/openshift-release-dev/ocp-release@sha256:31c7741fc7bb73ff752ba43f5acf014b8fadd69196fc522241302de918066cb1",
    "startedTime": "2023-02-09T09:05:12Z",
    "state": "Completed",
    "verified": true,
    "version": "4.12.2"
  },
  {
    "completionTime": "2023-01-18T19:23:07Z",
    "image": "quay.io/openshift-release-dev/ocp-release@sha256:4c5a7e26d707780be6466ddc9591865beb2e3baa5556432d23e8d57966a2dd18",
    "startedTime": "2023-01-18T18:42:01Z",
    "state": "Completed",
    "verified": false,
    "version": "4.12.0"
  }
]

Load balancer config

Default haproxy config

  • Inter 1s (The “inter” parameter sets the interval between two consecutive health checks to milliseconds.)

  • Fall 2 (The “fall” parameter states that a server will be considered as dead after consecutive unsuccessful health checks.)

  • Rise 3 (The “rise” parameter states that a server will be considered as operational after consecutive successful health checks.)

  • HttpCheck GET /readyz HTTP/1.0

global
  stats socket /var/lib/haproxy/run/haproxy.sock  mode 600 level admin expose-fd listeners
defaults
  maxconn 20000
  mode    tcp
  log     /var/run/haproxy/haproxy-log.sock local0
  option  dontlognull
  retries 3
  timeout http-request 30s
  timeout queue        1m
  timeout connect      10s
  timeout client       86400s
  timeout server       86400s
  timeout tunnel       86400s
frontend  main
  bind :::9445 v4v6
  default_backend masters
listen health_check_http_url
  bind :::9444 v4v6
  mode http
  monitor-uri /haproxy_ready
  option dontlognull
listen stats
  bind localhost:29445
  mode http
  stats enable
  stats hide-version
  stats uri /haproxy_stats
  stats refresh 30s
  stats auth Username:Password
backend masters
   option  httpchk GET /readyz HTTP/1.0
   option  log-health-checks
   balance roundrobin
   server master-0 10.10.0.209:6443 weight 1 verify none check check-ssl inter 1s fall 2 rise 3
   server master-2 10.10.0.228:6443 weight 1 verify none check check-ssl inter 1s fall 2 rise 3
   server master-1 10.10.0.250:6443 weight 1 verify none check check-ssl inter 1s fall 2 rise 3