; ; Sample ceph ceph.conf file. ; ; This file defines cluster membership, the various locations ; that Ceph stores data, and any other runtime options. ; If a 'host' is defined for a daemon, the start/stop script will ; verify that it matches the hostname (or else ignore it). If it is ; not defined, it is assumed that the daemon is intended to start on ; the current host (e.g., in a setup with a startup.conf on each ; node). ; global [global] pid file = /var/run/ceph/$name.pid ; some minimal logging (just message traffic) to aid debugging debug ms = 1 ; monitor [mon] mon data = /data/mon$id [mon0] host = alpha mon addr = 192.168.0.10:6789 [mon1] host = beta mon addr = 192.168.0.11:6789 [mon2] host = gamma mon addr = 192.168.0.12:6789 ; mds [mds] [mds.alpha] host = alpha ; osd [osd] sudo = true osd data = /data/osd$id [osd0] host = alpha ; if 'btrfs devs' is not specified, you're responsible for ; setting up the 'osd data' dir. if it is not btrfs, things ; will behave up until you try to recover from a crash (which ; usually fine for basic testing). btrfs devs = /dev/sdx [osd1] host = alpha btrfs devs = /data/dev/sdy [osd2] host = beta btrfs devs = /dev/sdx [osd3] host = beta btrfs devs = /dev/sdy ; access control [group everyone] ; you probably want to limit this to a small or a list of ; hosts. clients are fully trusted. addr = 0.0.0.0/0 [mount /] allow = %everyone