Text to be summarized (scrollable select) |
Amplifier input scale (select) |
Selected Conversation (Scrollable)
#!/usr/bin/env bash
set -x
python code/pre_batch.py \
-glove ../GoogleNews-vectors-negative300.bin \
-query ./questions/query.txt \
-preparse input_fed.txt \
-features 1000 \
-topics 100 \
-words 15 \
-word_tf_input 65 \
#-verbose
python code/make_datafiles.py \
-stories ./input_fed.txt \
-tokenized ./data_file/tokenized \
-package ./data_file/finished_files
# SCALE=1
SUMMARIZATION_DIR="summarization_dir"
mkdir -p ${SUMMARIZATION_DIR}
DECODED_DIR="./pretrained_model/decode_test_400maxenc_4beam_35mindec_100maxdec_ckpt-238410"
SCALE_ARR=$1
for SCALE in ${SCALE_ARR[@]}
do
echo $SCALE
SUMMARY="./$SUMMARIZATION_DIR/summarization_beam_search_$SCALE.txt"
SUMMARY_LOG="./$SUMMARIZATION_DIR/log_summarization_beam_search_$SCALE.txt"
rm -rf ${DECODED_DIR}
python code/run_summarization.py \
--mode=decode \
--data_path=./data_file/finished_files/chunked/test_* \
--vocab_path=./data_file/finished_files/vocab \
--log_root=. \
--exp_name=pretrained_model \
--max_enc_steps=400 \
--max_dec_steps=100 \
--coverage=1 \
--single_pass=1 \
--scale=$SCALE \
> ${SUMMARY_LOG}
python code/parse_decoding.py \
-decode ${DECODED_DIR}/decoded/ \
-summary ${SUMMARY}
sort $SUMMARY > "./$SUMMARIZATION_DIR/summary_sort_$SCALE.csv"
done
set +x
$ `virtualenv env`
$ `source env/bin/activate`
$ `pip install -r requirements.txt`
$ `chmod +x ./conversational.sh`
$ `./conversational.sh '2 3 4 5 6 7 8 9 10 11 12'`
`# ./conversational.sh '< List of scaling factors to be evaluated>'`
Department of Computer Science, Columbia University
Department of Computer Science, Columbia University
Department of Computer Science, Columbia University