Mongodb
mongodb 分片集群已配置但無法正常工作
我已經在由兩台伺服器組成的副本集上的 aws 上配置了一個分片集群,一個配置伺服器和一個前端伺服器以及 mongo 3.4。我主要遵循本教程:
配置似乎正確。但是,當我嘗試在客戶端集合上插入 1.000.000 個文件時,數據沒有被分塊。
你能幫忙嗎?
mongos> db.printShardingStatus({verbose:true}) --- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("59fb2f9173d9611f98c95eb3") } shards: { "_id" : "rs1", "host" : "rs1/172.31.12.34:27018,172.31.13.80:27018", "state" : 1 } active mongoses: { "_id" : "ip-172-31-25-254:27017", "ping" : ISODate("2017-11-02T23:04:26.780Z"), "up" : NumberLong(14602), "waiting" : true, "mongoVersion" : "3.4.10" } autosplit: Currently enabled: yes balancer: Currently enabled: yes Currently running: no NaN Failed balancer rounds in last 5 attempts: 5 Last reported error: could not find host matching read preference { mode: "primary" } for set rs2 Time of Reported error: Thu Nov 02 2017 18:56:13 GMT+0000 (UTC) Migration Results for the last 24 hours: No recent migrations databases: { "_id" : "tests", "primary" : "rs1", "partitioned" : true } tests.clients shard key: { "num" : 1 } unique: false balancing: true chunks: rs1 1 { "num" : { "$minKey" : 1 } } -->> { "num" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) mongos> db.clients.stats() { "sharded" : true, "capped" : false, "ns" : "tests.clients", "count" : 1000000, "size" : 35000000, "storageSize" : 12296192, "totalIndexSize" : 9867264, "indexSizes" : { "_id_" : 9867264 }, "avgObjSize" : 35, "nindexes" : 1, "nchunks" : 1, "shards" : { "rs1" : { "ns" : "tests.clients", "size" : 35000000, "count" : 1000000, "avgObjSize" : 35, "storageSize" : 12296192, "capped" : false, "wiredTiger" : { "metadata" : { "formatVersion" : 1 }, "creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=true),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_max=15,merge_min=0),memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u", "type" : "file", "uri" : "statistics:table:collection-11-1418402185877323060", "LSM" : { "bloom filter false positives" : 0, "bloom filter hits" : 0, "bloom filter misses" : 0, "bloom filter pages evicted from cache" : 0, "bloom filter pages read into cache" : 0, "bloom filters in the LSM tree" : 0, "chunks in the LSM tree" : 0, "highest merge generation in the LSM tree" : 0, "queries that could have benefited from a Bloom filter that did not exist" : 0, "sleep for LSM checkpoint throttle" : 0, "sleep for LSM merge throttle" : 0, "total size of bloom filters" : 0 }, "block-manager" : { "allocations requiring file extension" : 1454, "blocks allocated" : 1622, "blocks freed" : 109, "checkpoint size" : 12238848, "file allocation unit size" : 4096, "file bytes available for reuse" : 40960, "file magic number" : 120897, "file major version number" : 1, "file size in bytes" : 12296192, "minor version number" : 0 }, "btree" : { "btree checkpoint generation" : 379, "column-store fixed-size leaf pages" : 0, "column-store internal pages" : 0, "column-store variable-size RLE encoded values" : 0, "column-store variable-size deleted values" : 0, "column-store variable-size leaf pages" : 0, "fixed-record size" : 0, "maximum internal page key size" : 368, "maximum internal page size" : 4096, "maximum leaf page key size" : 2867, "maximum leaf page size" : 32768, "maximum leaf page value size" : 67108864, "maximum tree depth" : 3, "number of key/value pairs" : 0, "overflow pages" : 0, "pages rewritten by compaction" : 0, "row-store internal pages" : 0, "row-store leaf pages" : 0 }, "cache" : { "bytes currently in the cache" : 10945398, "bytes read into cache" : 0, "bytes written from cache" : 43275208, "checkpoint blocked page eviction" : 0, "data source pages selected for eviction unable to be evicted" : 0, "hazard pointer blocked page eviction" : 0, "in-memory page passed criteria to be split" : 18, "in-memory page splits" : 9, "internal pages evicted" : 0, "internal pages split during eviction" : 0, "leaf pages split during eviction" : 29, "modified pages evicted" : 29, "overflow pages read into cache" : 0, "overflow values cached in memory" : 0, "page split during eviction deepened the tree" : 0, "page written requiring lookaside records" : 0, "pages read into cache" : 0, "pages read into cache requiring lookaside entries" : 0, "pages requested from the cache" : 1000028, "pages written from cache" : 1577, "pages written requiring in-memory restoration" : 2, "tracked dirty bytes in the cache" : 0, "unmodified pages evicted" : 1195 }, "cache_walk" : { "Average difference between current eviction generation when the page was last considered" : 0, "Average on-disk page image size seen" : 0, "Clean pages currently in cache" : 0, "Current eviction generation" : 0, "Dirty pages currently in cache" : 0, "Entries in the root page" : 0, "Internal pages currently in cache" : 0, "Leaf pages currently in cache" : 0, "Maximum difference between current eviction generation when the page was last considered" : 0, "Maximum page size seen" : 0, "Minimum on-disk page image size seen" : 0, "On-disk page image sizes smaller than a single allocation unit" : 0, "Pages created in memory and never written" : 0, "Pages currently queued for eviction" : 0, "Pages that could not be queued for eviction" : 0, "Refs skipped during cache traversal" : 0, "Size of the root page" : 0, "Total number of pages currently in cache" : 0 }, "compression" : { "compressed pages read" : 0, "compressed pages written" : 1526, "page written failed to compress" : 0, "page written was too small to compress" : 51, "raw compression call failed, additional data available" : 0, "raw compression call failed, no additional data available" : 0, "raw compression call succeeded" : 0 }, "cursor" : { "bulk-loaded cursor-insert calls" : 0, "create calls" : 5, "cursor-insert key and value bytes inserted" : 38917635, "cursor-remove key bytes removed" : 0, "cursor-update value bytes updated" : 0, "insert calls" : 1000000, "next calls" : 1, "prev calls" : 1, "remove calls" : 0, "reset calls" : 1000002, "restarted searches" : 0, "search calls" : 0, "search near calls" : 0, "truncate calls" : 0, "update calls" : 0 }, "reconciliation" : { "dictionary matches" : 0, "fast-path pages deleted" : 0, "internal page key bytes discarded using suffix compression" : 4198, "internal page multi-block writes" : 18, "internal-page overflow keys" : 0, "leaf page key bytes discarded using prefix compression" : 0, "leaf page multi-block writes" : 52, "leaf-page overflow keys" : 0, "maximum blocks required for a page" : 54, "overflow values written" : 0, "page checksum matches" : 644, "page reconciliation calls" : 93, "page reconciliation calls for eviction" : 29, "pages deleted" : 0 }, "session" : { "object compaction" : 0, "open cursor count" : 1 }, "transaction" : { "update conflicts" : 0 } },
正如您的 sh.status() 所說:
shards: { "_id" : "rs1", "host" : "rs1/172.31.12.34:27018,172.31.13.80:27018", "state" : 1 }
您的集群中只有一個處於活動狀態的分片。沒有平衡器可以平衡的地方。
錯誤資訊說:
could not find host matching read preference { mode: "primary" } for set rs2
因此,請檢查您的配置。RS2 它正在工作(它有主),它被添加到集群並在 sh.status() 中列出。
我按照您的要求添加了第二個分片。但是,沒有任何改變。我刪除了所有現有文件並添加了 1.000.000 個新文件。
mongos> sh.status() --- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("59fb2f9173d9611f98c95eb3") } shards: { "_id" : "rs1", "host" : "rs1/172.31.12.34:27018,172.31.13.80:27018", "state" : 1 } { "_id" : "rs2", "host" : "rs2/ip-172-31-0-181:27018", "state" : 1 } active mongoses: "3.4.10" : 1 autosplit: Currently enabled: yes balancer: Currently enabled: yes Currently running: no NaN Failed balancer rounds in last 5 attempts: 5 Last reported error: could not find host matching read preference { mode: "primary" } for set rs2 Time of Reported error: Thu Nov 02 2017 18:56:13 GMT+0000 (UTC) Migration Results for the last 24 hours: No recent migrations databases: { "_id" : "tests", "primary" : "rs1", "partitioned" : true } tests.clients shard key: { "num" : 1 } unique: false balancing: true chunks: rs1 1 { "num" : { "$minKey" : 1 } } -->> { "num" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) tests.vquest_metadata shard key: { "_id" : 1 } unique: false balancing: true chunks: rs1 1 { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) mongos>