2017-12-02 2 views
2

저장 데이터 용 mongodb를 실행 중입니다. MongoDB 클러스터에는 3 개의 샤드가 있고 각 샤드에는 3 개의 서버 복제 세트, 2 개의 몽고 및 3 개의 구성 서버가 있습니다. 각 서버에는 1TB의 저장 공간이 있습니다. 이제 3 개의 파쇄물 중 2 개가 90 % 용량의 데이터를 보유합니다. 새로운 샤드를 추가 할 때, MongoDB는 오래된 샤드에서 새 샤드를 옮기지 않습니다. 몽고스 샤딩 상태를 확인합니다. 그것은 저에게 그것을 보여줍니다. MongoDB baclancer가 실행 중입니다.MongoDB가 샤딩 클러스터의 새 샤드로 이동하지 않습니다.

mongos> db.printShardingStatus() 
 
--- Sharding Status --- 
 
    sharding version: { 
 
\t "_id" : 1, 
 
\t "minCompatibleVersion" : 5, 
 
\t "currentVersion" : 6, 
 
\t "clusterId" : ObjectId("59c0ef31619ac70cb8ac5f5c") 
 
} 
 
    shards: 
 
\t { "_id" : "rs0", "host" : "rs0/10.5.36.88:27017,10.5.36.92:27017,10.5.36.93:27017", "state" : 1, "maxSize" : 990000 } 
 
\t { "_id" : "rs1", "host" : "rs1/10.5.36.101:27017,10.5.36.103:27017,10.5.36.97:27017", "state" : 1, "maxSize" : 990000 } 
 
\t { "_id" : "rs2", "host" : "rs2/10.5.36.100:27017,10.5.36.117:27017,10.5.36.126:27017", "state" : 1, "maxSize" : 990000 } 
 
\t { "_id" : "rs3", "host" : "rs3/10.5.36.152:27017,10.5.36.156:27017,10.5.36.164:27017", "state" : 1, "maxSize" : 990000 } 
 
    active mongoses: 
 
\t "3.4.9" : 1 
 
autosplit: 
 
\t Currently enabled: yes 
 
    balancer: 
 
\t Currently enabled: yes 
 
\t Currently running: yes 
 
\t \t Balancer lock taken at Wed Sep 20 2017 09:21:43 GMT+0700 by ConfigServer:Balancer 
 
\t Collections with active migrations: 
 
\t \t fbgroups.comments started at Wed Nov 22 2017 22:36:15 GMT+0700 
 
\t Failed balancer rounds in last 5 attempts: 0 
 
\t Migration Results for the last 24 hours: 
 
\t \t No recent migrations 
 
    databases: 
 
\t { "_id" : "fbpages", "primary" : "rs0", "partitioned" : true } 
 
\t \t fbpages.comments 
 
\t \t \t shard key: { "CommentFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 6263 
 
\t \t \t \t rs1 \t 6652 
 
\t \t \t \t rs2 \t 6175 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbpages.links 
 
\t \t \t shard key: { "PageFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 23 
 
\t \t \t \t rs1 \t 23 
 
\t \t \t \t rs2 \t 23 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbpages.posts 
 
\t \t \t shard key: { "PostFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 11931 
 
\t \t \t \t rs1 \t 11847 
 
\t \t \t \t rs2 \t 5043 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t { "_id" : "fbgroups", "primary" : "rs0", "partitioned" : true } 
 
\t \t fbgroups.comments 
 
\t \t \t shard key: { "CommentFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 6451 
 
\t \t \t \t rs1 \t 6451 
 
\t \t \t \t rs2 \t 4742 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbgroups.links 
 
\t \t \t shard key: { "GroupId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 3 
 
\t \t \t \t rs1 \t 3 
 
\t \t \t \t rs2 \t 3 
 
\t \t \t { "GroupId" : { "$minKey" : 1 } } -->> { "GroupId" : "1391082767860588" } on : rs2 Timestamp(7, 0) 
 
\t \t \t { "GroupId" : "1391082767860588" } -->> { "GroupId" : "1564129037230139" } on : rs0 Timestamp(7, 1) 
 
\t \t \t { "GroupId" : "1564129037230139" } -->> { "GroupId" : "172020656162023" } on : rs0 Timestamp(4, 0) 
 
\t \t \t { "GroupId" : "172020656162023" } -->> { "GroupId" : "244621675585655" } on : rs0 Timestamp(5, 0) 
 
\t \t \t { "GroupId" : "244621675585655" } -->> { "GroupId" : "375231932588613" } on : rs2 Timestamp(6, 0) 
 
\t \t \t { "GroupId" : "375231932588613" } -->> { "GroupId" : "506856652708047" } on : rs2 Timestamp(8, 0) 
 
\t \t \t { "GroupId" : "506856652708047" } -->> { "GroupId" : "67046218160" } on : rs1 Timestamp(8, 1) 
 
\t \t \t { "GroupId" : "67046218160" } -->> { "GroupId" : "878610618830881" } on : rs1 Timestamp(1, 7) 
 
\t \t \t { "GroupId" : "878610618830881" } -->> { "GroupId" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 8) 
 
\t \t fbgroups.postdata 
 
\t \t \t shard key: { "_id" : 1 } 
 
\t \t \t unique: false 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 91 
 
\t \t \t \t rs1 \t 482 
 
\t \t \t \t rs2 \t 91 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbgroups.posts 
 
\t \t \t shard key: { "PostFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 26015 
 
\t \t \t \t rs1 \t 26092 
 
\t \t \t \t rs2 \t 6526 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t { "_id" : "test", "primary" : "rs1", "partitioned" : true } 
 
\t { "_id" : "intership", "primary" : "rs1", "partitioned" : false } 
 
\t { "_id" : "fbhashtags", "primary" : "rs2", "partitioned" : true } 
 
\t \t fbhashtags.postdata 
 
\t \t \t shard key: { "_id" : 1 } 
 
\t \t \t unique: false 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 2 
 
\t \t \t \t rs1 \t 2 
 
\t \t \t \t rs2 \t 2 
 
\t \t \t { "_id" : { "$minKey" : 1 } } -->> { "_id" : ObjectId("58f122ff7fe5fb4520b4185a") } on : rs0 Timestamp(2, 0) 
 
\t \t \t { "_id" : ObjectId("58f122ff7fe5fb4520b4185a") } -->> { "_id" : ObjectId("58fac0537fe5fb051d0749de") } on : rs1 Timestamp(3, 0) 
 
\t \t \t { "_id" : ObjectId("58fac0537fe5fb051d0749de") } -->> { "_id" : ObjectId("5906119e7fe5fb2c7d9d41e9") } on : rs0 Timestamp(4, 0) 
 
\t \t \t { "_id" : ObjectId("5906119e7fe5fb2c7d9d41e9") } -->> { "_id" : ObjectId("591012257fe5fb70dc9e49bf") } on : rs1 Timestamp(5, 0) 
 
\t \t \t { "_id" : ObjectId("591012257fe5fb70dc9e49bf") } -->> { "_id" : ObjectId("5918b5d77fe5fb2feb06338a") } on : rs2 Timestamp(5, 1) 
 
\t \t \t { "_id" : ObjectId("5918b5d77fe5fb2feb06338a") } -->> { "_id" : { "$maxKey" : 1 } } on : rs2 Timestamp(1, 5) 
 
\t \t fbhashtags.posts 
 
\t \t \t shard key: { "_id" : 1 } 
 
\t \t \t unique: false 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs2 \t 1 
 
\t \t \t { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs2 Timestamp(1, 0) 
 
\t { "_id" : "fbprofiles", "primary" : "rs2", "partitioned" : true } 
 
\t \t fbprofiles.fbcomments 
 
\t \t \t shard key: { "commentFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 18 
 
\t \t \t \t rs1 \t 18 
 
\t \t \t \t rs2 \t 19 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbprofiles.fbposts 
 
\t \t \t shard key: { "postFbId" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 7 
 
\t \t \t \t rs1 \t 7 
 
\t \t \t \t rs2 \t 3144 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t \t fbprofiles.fbprofiles 
 
\t \t \t shard key: { "baseUrl" : 1 } 
 
\t \t \t unique: true 
 
\t \t \t balancing: true 
 
\t \t \t chunks: 
 
\t \t \t \t rs0 \t 2 
 
\t \t \t \t rs1 \t 2 
 
\t \t \t \t rs2 \t 141 
 
\t \t \t too many chunks to print, use verbose if you want to force print 
 
\t { "_id" : "testnewfb", "primary" : "rs2", "partitioned" : false } 
 
\t { "_id" : "news_images", "primary" : "rs2", "partitioned" : false } 
 
\t { "_id" : "social_index", "primary" : "rs2", "partitioned" : false } 
 
\t { "_id" : "twitter", "primary" : "rs2", "partitioned" : true } 
 
\t { "_id" : "techmeme", "primary" : "rs2", "partitioned" : false }
MongoDB를 새로운 파편 (RS3) 덕분에 데이터를 이동하지 왜

!

답변

1

내 경우에는 문제가 발견되었습니다. 3 개의 Mongo 설정 서버 중 1 개가 모든 호스트에 올바르게 설정되어 있지 않습니다. 기본 설정 서버 인/etc/hosts와 stepDown()을 다시 설정합니다. 그리고 나는 그것이 올바르게 다시 작동하는 것을 본다. 그것은 많은 시간을 소비합니다. 나는 서버가 전기로 인해 추락 할 때 이전 상태에서 복구 할 수 없다는 점을 학대합니다.

관련 문제