数据导入脚本如下
import time
import sys
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
reload(sys)
sys.setdefaultencoding('utf-8')
def set_mapping(es, index_name = "content_engine", doc_type_name = "en"):
my_mapping = {
"en": {
"properties": {
"a": {
"type": "string"
},
"b": {
"type": "string"
}
}
}
}
create_index = es.indices.create(index = index_name,body = my_mapping)
mapping_index = es.indices.put_mapping(index = index_name, doc_type = doc_type_name, body = my_mapping)
if create_index["acknowledged"] != True or mapping_index["acknowledged"] != True:
print "Index creation failed..."
def set_data(es, input_file, index_name = "content_engine", doc_type_name="en"):
i = 0
count = 0
ACTIONS = []
for line in open(input_file):
fields = line.replace("\r\n", "").replace("\n", "").split("----")
if len(fields) == 2:
a, b = fields
else:
continue
action = {
"_index": index_name,
"_type": doc_type_name,
"_source": {
"a": a,
"b": b,
}
}
i += 1
ACTIONS.append(action)
if (i == 500000):
success, _ = bulk(es, ACTIONS, index = index_name, raise_on_error = True)
count += success
i = 0
ACTIONS = []
success, _ = bulk(es, ACTIONS, index = index_name, raise_on_error=True)
count += success
print("insert %s lines" % count)
if __name__ == '__main__':
es = Elasticsearch(hosts=["127.0.0.1:9200"], timeout=5000)
set_mapping(es)
set_data(es,sys.argv[1])
数据大概 5 个 G 吧,机器配置虚拟机 24G 内存,刚开始无内存泄露现象,这个 Python 脚本的进程内存一直保持 1G 左右的占用,当插入 1600 w,内存开始持续飙升,最后达到 22G ,导致触发 OOM 机制, Python 进程被内核 kill ,差点怀疑人生。。大家在遇到 Python 内存泄露都是怎么定位的?
这是一个专为移动设备优化的页面(即为了让你能够在 Google 搜索结果里秒开这个页面),如果你希望参与 V2EX 社区的讨论,你可以继续到 V2EX 上打开本讨论主题的完整版本。
V2EX 是创意工作者们的社区,是一个分享自己正在做的有趣事物、交流想法,可以遇见新朋友甚至新机会的地方。
V2EX is a community of developers, designers and creative people.