ceph/ceph-ansible

Cannot install Ceph Dashboard without rgws

uncelvel opened this issue · 4 comments

Bug Report

What happened:
Cannot install Dashboard without [rgws]

What you expected to happen:
Install Ceph Dashboard without [rgws]

How to reproduce it (minimal and precise):
Check rgws

Share your group_vars files, inventory and full ceph-ansibe log

---
dummy:


###########
# GENERAL #
###########
cluster: hn-lab
configure_firewall: True



############
# PACKAGES #
############

debian_package_dependencies: []
ntp_daemon_type: chronyd

upgrade_ceph_packages: False

###########
# INSTALL #
###########

ceph_repository_type: cdn
ceph_origin: repository
ceph_repository: community
ceph_stable_release: pacific
monitor_interface: ens4
mgmt_network: 172.16.0.0/16
public_network: 192.168.4.0/24
cluster_network: 192.168.5.0/24


######################
# CEPH CONFIGURATION #
######################

fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
cephx: true


######################
# CEPH CONFIGURATION #
######################

ceph_conf_overrides:
  global:
    osd_pool_default_pg_num: 256
    osd_pool_default_size: 2
    osd_pool_default_min_size: 1

    # --> Allow delete pool -- NOT RECOMMEND
    mon_allow_pool_delete: true

    rbd_cache: true

    # Disable auto update crush:> Modify Crushmap OSD tree
    osd_crush_update_on_start: true 

    # Backfilling and recovery
    osd_max_backfills: 1
    osd_recovery_max_active: 1
    osd_recovery_max_single_start: 1
    osd_recovery_op_priority: 1

    # Osd recovery threads: 1
    osd_backfill_scan_max: 16
    osd_backfill_scan_min: 4
    mon_osd_backfillfull_ratio: 0.95

    # Scrubbing
    osd_max_scrubs: 1
    osd_scrub_during_recovery: false
    # osd scrub begin hour: 22 
    # osd scrub end hour: 4

    # Max PG / OSD
    mon_max_pg_per_osd: 500


# ceph_conf_overrides:
#  global:
#  mutex_perf_counter : True
#  throttler_perf_counter : False
#  auth_cluster_required: none
#  auth_service_required: none
#  auth_client_required: none
#  auth supported: none
#  osd objectstore: bluestore
#  cephx require signatures: False
#  cephx sign messages: False
#  mon_allow_pool_delete: True
#  mon_max_pg_per_osd: 800
#  mon pg warn max per osd: 800
#  ms_crc_header: True
#  ms_crc_data: False
#  ms type: async
#  perf: True
#  rocksdb_perf: True
#  osd_pool_default_size: 2
#  debug asok: 0/0
#  debug auth: 0/0
#  debug bluefs: 0/0
#  debug bluestore: 0/0
#  debug buffer: 0/0
#  debug client: 0/0
#  debug context: 0/0
#  debug crush: 0/0
#  debug filer: 0/0
#  debug filestore: 0/0
#  debug finisher: 0/0
#  debug hadoop: 0/0
#  debug heartbeatmap: 0/0
#  debug journal: 0/0
#  debug journaler: 0/0
#  debug lockdep: 0/0
#  debug log: 0
#  debug mon: 0/0
#  debug monc: 0/0
#  debug ms: 0/0
#  debug objclass: 0/0
#  debug objectcacher: 0/0
#  debug objecter: 0/0
#  debug optracker: 0/0
#  debug osd: 0/0
#  debug paxos: 0/0
#  debug perfcounter: 0/0
#  debug rados: 0/0
#  debug rbd: 0/0
#  debug rgw: 0/0
#  debug rocksdb: 0/0
#  debug throttle: 0/0
#  debug timer: 0/0
#  debug tp: 0/0
#  debug zs: 0/0
#  mon:
#  mon_max_pool_pg_num: 166496
#  mon_osd_max_split_count: 10000
#  client:
#  rbd_cache: false
#  rbd_cache_writethrough_until_flush: false
#  osd:
#  osd_min_pg_log_entries: 10
#  osd_max_pg_log_entries: 10
#  osd_pg_log_dups_tracked: 10
#  osd_pg_log_trim_min: 10
#  bluestore_block_db_size: 15360000000
#  bluestore_block_wal_size: 15360000000
#  bluestore_csum_type: none
#  bluestore_cache_kv_max: 200G
#  bluestore_cache_kv_ratio: 0.2
#  bluestore_cache_meta_ratio: 0.8
#  bluestore_cache_size_ssd: 18G
#  bluestore_extent_map_shard_min_size: 50
#  bluestore_extent_map_shard_max_size: 200
#  bluestore_extent_map_shard_target_size: 100
#  disable_transparent_hugepage: true
#  journal_queue_max_ops : 8092
#  journal_queue_max_bytes : 1048576000
#  ms_dispatch_throttle_bytes : 1048576000
#  objecter_inflight_ops : 10240
#  objecter_inflight_op_bytes : 1048576000
#  journal_max_write_entries : 5000
#  journal_max_write_bytes : 1048576000
#  osd_enable_op_tracker: false
#  osd_op_num_threads_per_shard: 2

#############
# OS TUNING #
#############

os_tuning_params:
 - { name: kernel.pid_max, value: 4194303 }
 - { name: fs.file-max, value: 26234859 }
 - { name: vm.zone_reclaim_mode, value: 0 }
 - { name: vm.swappiness, value: 1 }
 - { name: vm.min_free_kbytes, value: 1000000 }
 - { name: net.core.rmem_max, value: 268435456 }
 - { name: net.core.wmem_max, value: 268435456 }
 - { name: net.ipv4.tcp_rmem, value: 4096 87380 134217728 }
 - { name: net.ipv4.tcp_wmem, value: 4096 65536 134217728 }
# ceph_tcmalloc_max_total_thread_cache: 134217728


#############
# OPENSTACK #
#############

openstack_config: true

openstack_glance_pool:
  name: "images"
#   rule_name: "my_replicated_rule"
  application: "rbd"
#   pg_autoscale_mode: true
#   pg_num: 16
#   pgp_num: 16
#   target_size_ratio: 0.2
openstack_cinder_pool:
  name: "volumes"
  application: "rbd"
openstack_nova_pool:
  name: "vms"
  application: "rbd"

openstack_pools:
  - "{{ openstack_glance_pool }}"
  - "{{ openstack_cinder_pool }}"
  - "{{ openstack_nova_pool }}"

openstack_keys:
  - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}"}, mode: "0600" }

#############
# DASHBOARD #
#############
dashboard_enabled: True
dashboard_protocol: https
dashboard_port: 8443
dashboard_network: "{{ public_network }}"
dashboard_admin_user: admin
dashboard_admin_user_ro: false
dashboard_admin_password: Infras_2022
#dashboard_crt: ''
#dashboard_key: ''
#dashboard_certificate_cn: ceph-dashboard
#dashboard_tls_external: false
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_frontend_vip: ''
#dashboard_disabled_features: []

#prometheus_frontend_vip: ''
#alertmanager_frontend_vip: ''

node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0"
node_exporter_port: 9100

grafana_admin_user: admin
grafana_admin_password: Infras_2022
grafana_container_image: "docker.io/grafana/grafana:6.7.4"
grafana_container_cpu_period: 100000
grafana_container_cpu_cores: 1
grafana_container_memory: 1
grafana_datasource: Dashboard
grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard"
grafana_dashboard_version: pacific 
grafana_dashboard_files:
  - ceph-cluster.json
  - cephfs-overview.json
  - host-details.json
  - hosts-overview.json
  - osd-device-details.json
  - osds-overview.json
  - pool-detail.json
  - pool-overview.json
  - rbd-details.json
  - rbd-overview.json
grafana_plugins:
 - vonage-status-panel
 - grafana-piechart-panel
#grafana_allow_embedding: True
#grafana_port: 3000
grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {}

prometheus_container_image: "docker.io/prom/prometheus:v2.7.2"
prometheus_container_cpu_period: 100000
prometheus_container_cpu_cores: 1
prometheus_container_memory: 1
prometheus_data_dir: /var/lib/prometheus
prometheus_conf_dir: /etc/prometheus
#prometheus_conf_overrides: {}
prometheus_storage_tsdb_retention_time: 15d

alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
alertmanager_container_cpu_period: 100000
alertmanager_container_cpu_cores: 1
alertmanager_container_memory: 1
alertmanager_data_dir: /var/lib/alertmanager
alertmanager_conf_dir: /etc/alertmanager
alertmanager_conf_overrides: {}
#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"

Ansible log

************************************************************************************************************************************************************
2022-04-19 14:31:54,502 p=2709263 u=root n=ansible | Tuesday 19 April 2022  14:31:54 +0700 (0:00:00.058)       0:04:57.228 ********* 
2022-04-19 14:31:54,569 p=2709263 u=root n=ansible | ok: [172.16.1.64 -> 172.16.1.64] => (item=172.16.1.64)
2022-04-19 14:31:54,590 p=2709263 u=root n=ansible | ok: [172.16.1.64 -> 172.16.1.65] => (item=172.16.1.65)
2022-04-19 14:31:54,592 p=2709263 u=root n=ansible | ok: [172.16.1.65 -> 172.16.1.64] => (item=172.16.1.64)
2022-04-19 14:31:54,618 p=2709263 u=root n=ansible | ok: [172.16.1.65 -> 172.16.1.65] => (item=172.16.1.65)
2022-04-19 14:31:54,631 p=2709263 u=root n=ansible | TASK [ceph-dashboard : get current mgr backend - ipv6] ************************************************************************************************************************************************************
2022-04-19 14:31:54,631 p=2709263 u=root n=ansible | Tuesday 19 April 2022  14:31:54 +0700 (0:00:00.129)       0:04:57.358 ********* 
2022-04-19 14:31:54,688 p=2709263 u=root n=ansible | skipping: [172.16.1.64] => (item=172.16.1.64) 
2022-04-19 14:31:54,706 p=2709263 u=root n=ansible | skipping: [172.16.1.64] => (item=172.16.1.65) 
2022-04-19 14:31:54,712 p=2709263 u=root n=ansible | skipping: [172.16.1.65] => (item=172.16.1.64) 
2022-04-19 14:31:54,727 p=2709263 u=root n=ansible | skipping: [172.16.1.65] => (item=172.16.1.65) 
2022-04-19 14:31:54,737 p=2709263 u=root n=ansible | TASK [include_role : ceph-facts] **********************************************************************************************************************************************************************************
2022-04-19 14:31:54,738 p=2709263 u=root n=ansible | Tuesday 19 April 2022  14:31:54 +0700 (0:00:00.105)       0:04:57.464 ********* 
2022-04-19 14:31:54,767 p=2709263 u=root n=ansible | fatal: [172.16.1.64]: FAILED! => 
  msg: '''dict object'' has no attribute ''rgws'''
2022-04-19 14:31:54,778 p=2709263 u=root n=ansible | fatal: [172.16.1.65]: FAILED! => 
  msg: '''dict object'' has no attribute ''rgws'''
2022-04-19 14:31:54,780 p=2709263 u=root n=ansible | PLAY RECAP ********************************************************************************************************************************************************************************************************
2022-04-19 14:31:54,780 p=2709263 u=root n=ansible | 172.16.1.64                : ok=400  changed=24   unreachable=0    failed=1    skipped=586  rescued=0    ignored=0   
2022-04-19 14:31:54,781 p=2709263 u=root n=ansible | 172.16.1.65                : ok=278  changed=12   unreachable=0    failed=1    skipped=511  rescued=0    ignored=0   
2022-04-19 14:31:54,781 p=2709263 u=root n=ansible | 172.16.1.66                : ok=199  changed=11   unreachable=0    failed=0    skipped=392  rescued=0    ignored=0   
2022-04-19 14:31:54,781 p=2709263 u=root n=ansible | INSTALLER STATUS **************************************************************************************************************************************************************************************************
2022-04-19 14:31:54,784 p=2709263 u=root n=ansible | Install Ceph Monitor           : Complete (0:00:29)
2022-04-19 14:31:54,784 p=2709263 u=root n=ansible | Install Ceph Manager           : Complete (0:00:24)
2022-04-19 14:31:54,785 p=2709263 u=root n=ansible | Install Ceph OSD               : Complete (0:00:46)
2022-04-19 14:31:54,785 p=2709263 u=root n=ansible | Install Ceph Dashboard         : In Progress (0:00:09)
2022-04-19 14:31:54,785 p=2709263 u=root n=ansible | 	This phase can be restarted by running: roles/ceph-dashboard/tasks/main.yml
2022-04-19 14:31:54,786 p=2709263 u=root n=ansible | Install Ceph Grafana           : Complete (0:00:55)
2022-04-19 14:31:54,786 p=2709263 u=root n=ansible | Install Ceph Node Exporter     : Complete (0:00:24)
2022-04-19 14:31:54,786 p=2709263 u=root n=ansible | Tuesday 19 April 2022  14:31:54 +0700 (0:00:00.048)       0:04:57.513 ********* 
2022-04-19 14:31:54,787 p=2709263 u=root n=ansible | =============================================================================== 
2022-04-19 14:31:54,792 p=2709263 u=root n=ansible | ceph-infra : update cache for Debian based OSs ------------------------------------------------------------------------------------------------------------------------------------------------------------ 42.39s
2022-04-19 14:31:54,792 p=2709263 u=root n=ansible | check for python ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 14.90s
2022-04-19 14:31:54,792 p=2709263 u=root n=ansible | ceph-grafana : wait for grafana to start ------------------------------------------------------------------------------------------------------------------------------------------------------------------ 12.36s
2022-04-19 14:31:54,792 p=2709263 u=root n=ansible | ceph-grafana : make sure grafana is down ------------------------------------------------------------------------------------------------------------------------------------------------------------------ 10.04s
2022-04-19 14:31:54,793 p=2709263 u=root n=ansible | ceph-grafana : download ceph grafana dashboards ------------------------------------------------------------------------------------------------------------------------------------------------------------ 9.68s
2022-04-19 14:31:54,793 p=2709263 u=root n=ansible | gather and delegate facts ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 6.50s
2022-04-19 14:31:54,793 p=2709263 u=root n=ansible | ceph-infra : disable time sync using timesyncd if we are not using it -------------------------------------------------------------------------------------------------------------------------------------- 5.33s
2022-04-19 14:31:54,793 p=2709263 u=root n=ansible | ceph-config : look up for ceph-volume rejected devices ----------------------------------------------------------------------------------------------------------------------------------------------------- 4.28s
2022-04-19 14:31:54,793 p=2709263 u=root n=ansible | ceph-osd : create openstack pool(s) ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 3.96s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-osd : apply operating system tuning ------------------------------------------------------------------------------------------------------------------------------------------------------------------- 3.55s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-validate : get devices information -------------------------------------------------------------------------------------------------------------------------------------------------------------------- 3.47s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-config : look up for ceph-volume rejected devices ----------------------------------------------------------------------------------------------------------------------------------------------------- 3.33s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-common : install dependencies for apt modules --------------------------------------------------------------------------------------------------------------------------------------------------------- 3.30s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-container-engine : allow apt to use a repository over https (debian) ---------------------------------------------------------------------------------------------------------------------------------- 2.98s
2022-04-19 14:31:54,794 p=2709263 u=root n=ansible | ceph-infra : enable chronyd -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2.98s
2022-04-19 14:31:54,795 p=2709263 u=root n=ansible | ceph-container-engine : install container packages --------------------------------------------------------------------------------------------------------------------------------------------------------- 2.85s
2022-04-19 14:31:54,795 p=2709263 u=root n=ansible | ceph-common : configure debian ceph community repository stable key ---------------------------------------------------------------------------------------------------------------------------------------- 2.71s
2022-04-19 14:31:54,795 p=2709263 u=root n=ansible | ceph-osd : copy ceph key(s) if needed ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2.65s
2022-04-19 14:31:54,795 p=2709263 u=root n=ansible | ceph-mon : fetch ceph initial keys ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2.35s
2022-04-19 14:31:54,795 p=2709263 u=root n=ansible | ceph-config : look up for ceph-volume rejected devices ----------------------------------------------------------------------------------------------------------------------------------------------------- 2.00s
root@cephadmin:~/ceph-ansible# 

Environment:

  • OS (e.g. from /etc/os-release): 20.04.4
  • Kernel (e.g. uname -a):5.4.0-107
  • Ansible version (e.g. ansible-playbook --version):2.9.27
  • ceph-ansible version (e.g. git head or tag or stable branch):
  • Ceph version (e.g. ceph -v): 16.2.7

PR for the fix #7157

PR for the fix #7157

It's not my issue bro.
My issue is:

  • Deploy only RBD (non-RGW and non-MDS) you can see on my playbook setting
  • But dashboard require RGW
guits commented

I think what you are looking for is #7155

I think what you are looking for is #7155

you save my day
Thankyou
image