Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

prepare-wmt14en2de.sh 3.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
  1. #!/bin/bash
  2. # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
  3. echo 'Cloning Moses github repository (for tokenization scripts)...'
  4. git clone https://github.com/moses-smt/mosesdecoder.git
  5. echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
  6. git clone https://github.com/rsennrich/subword-nmt.git
  7. SCRIPTS=mosesdecoder/scripts
  8. TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
  9. CLEAN=$SCRIPTS/training/clean-corpus-n.perl
  10. NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl
  11. REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
  12. BPEROOT=subword-nmt
  13. BPE_TOKENS=40000
  14. URLS=(
  15. "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
  16. "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
  17. "http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz"
  18. "http://data.statmt.org/wmt17/translation-task/dev.tgz"
  19. "http://statmt.org/wmt14/test-full.tgz"
  20. )
  21. FILES=(
  22. "training-parallel-europarl-v7.tgz"
  23. "training-parallel-commoncrawl.tgz"
  24. "training-parallel-nc-v12.tgz"
  25. "dev.tgz"
  26. "test-full.tgz"
  27. )
  28. CORPORA=(
  29. "training/europarl-v7.de-en"
  30. "commoncrawl.de-en"
  31. "training/news-commentary-v12.de-en"
  32. )
  33. # This will make the dataset compatible to the one used in "Convolutional Sequence to Sequence Learning"
  34. # https://arxiv.org/abs/1705.03122
  35. if [ "$1" == "--icml17" ]; then
  36. URLS[2]="http://statmt.org/wmt14/training-parallel-nc-v9.tgz"
  37. FILES[2]="training-parallel-nc-v9.tgz"
  38. CORPORA[2]="training/news-commentary-v9.de-en"
  39. fi
  40. if [ ! -d "$SCRIPTS" ]; then
  41. echo "Please set SCRIPTS variable correctly to point to Moses scripts."
  42. exit
  43. fi
  44. src=en
  45. tgt=de
  46. lang=en-de
  47. prep=wmt14_en_de
  48. tmp=$prep/tmp
  49. orig=orig
  50. dev=dev/newstest2013
  51. mkdir -p $orig $tmp $prep
  52. cd $orig
  53. for ((i=0;i<${#URLS[@]};++i)); do
  54. file=${FILES[i]}
  55. if [ -f $file ]; then
  56. echo "$file already exists, skipping download"
  57. else
  58. url=${URLS[i]}
  59. wget "$url"
  60. if [ -f $file ]; then
  61. echo "$url successfully downloaded."
  62. else
  63. echo "$url not successfully downloaded."
  64. exit -1
  65. fi
  66. if [ ${file: -4} == ".tgz" ]; then
  67. tar zxvf $file
  68. elif [ ${file: -4} == ".tar" ]; then
  69. tar xvf $file
  70. fi
  71. fi
  72. done
  73. cd ..
  74. echo "pre-processing train data..."
  75. for l in $src $tgt; do
  76. rm $tmp/train.tags.$lang.tok.$l
  77. for f in "${CORPORA[@]}"; do
  78. cat $orig/$f.$l | \
  79. perl $NORM_PUNC $l | \
  80. perl $REM_NON_PRINT_CHAR | \
  81. perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l
  82. done
  83. done
  84. echo "pre-processing test data..."
  85. for l in $src $tgt; do
  86. if [ "$l" == "$src" ]; then
  87. t="src"
  88. else
  89. t="ref"
  90. fi
  91. grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \
  92. sed -e 's/<seg id="[0-9]*">\s*//g' | \
  93. sed -e 's/\s*<\/seg>\s*//g' | \
  94. sed -e "s/\’/\'/g" | \
  95. perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l
  96. echo ""
  97. done
  98. echo "splitting train and valid..."
  99. for l in $src $tgt; do
  100. awk '{if (NR%100 == 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/valid.$l
  101. awk '{if (NR%100 != 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/train.$l
  102. done
  103. TRAIN=$tmp/train.de-en
  104. BPE_CODE=$prep/code
  105. rm -f $TRAIN
  106. for l in $src $tgt; do
  107. cat $tmp/train.$l >> $TRAIN
  108. done
  109. echo "learn_bpe.py on ${TRAIN}..."
  110. python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
  111. for L in $src $tgt; do
  112. for f in train.$L valid.$L test.$L; do
  113. echo "apply_bpe.py to ${f}..."
  114. python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f
  115. done
  116. done
  117. perl $CLEAN -ratio 1.5 $tmp/bpe.train $src $tgt $prep/train 1 250
  118. perl $CLEAN -ratio 1.5 $tmp/bpe.valid $src $tgt $prep/valid 1 250
  119. for L in $src $tgt; do
  120. cp $tmp/bpe.test.$L $prep/test.$L
  121. done
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...