Python utils.merge_dicts() Examples
The following are 5
code examples of utils.merge_dicts().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: test_connect_write.py From pykiteconnect with MIT License | 6 votes |
def test_place_order_tag(kiteconnect): """Send custom tag and get it in orders.""" tag = "mytag" updated_params = utils.merge_dicts(params, { "product": kiteconnect.PRODUCT_MIS, "variety": kiteconnect.VARIETY_REGULAR, "order_type": kiteconnect.ORDER_TYPE_MARKET, "tag": tag }) order_id = kiteconnect.place_order(**updated_params) order_info = kiteconnect.order_history(order_id=order_id) assert order_info[0]["tag"] == tag try: cleanup_orders(kiteconnect, order_id) except Exception as e: warnings.warn(UserWarning("Error while cleaning up orders: {}".format(e)))
Example #2
Source File: test_connect_write.py From pykiteconnect with MIT License | 6 votes |
def setup_order_modify_cancel(kiteconnect, variety): symbol = params["exchange"] + ":" + params["tradingsymbol"] ltp = kiteconnect.ltp(symbol) updated_params = utils.merge_dicts(params, { "product": kiteconnect.PRODUCT_MIS, "variety": variety, "order_type": kiteconnect.ORDER_TYPE_LIMIT }) diff = ltp[symbol]["last_price"] * 0.01 updated_params["price"] = ltp[symbol]["last_price"] - (diff - (diff % 1)) order_id = kiteconnect.place_order(**updated_params) # delay order fetch so order is not in received state time.sleep(0.5) order = kiteconnect.order_history(order_id) status = order[-1]["status"].upper() if not is_pending_order(status): warnings.warn(UserWarning("Order is not open with status: ", status)) return return (updated_params, order_id, order)
Example #3
Source File: eval_tvqa_plus.py From TVQAplus with MIT License | 5 votes |
def main_eval(): import argparse parser = argparse.ArgumentParser() parser.add_argument("--gt_path", type=str, default="data/tvqa_plus_val.json", help="ground-truth json file path") parser.add_argument("--pred_path", type=str, help="input prediction json file path, the same format as the results " "returned by load_tvqa_plus_annotation func") parser.add_argument("--word2idx_path", type=str, default="data/word2idx.json", help="word2idx json file path, provided with the evaluation code") parser.add_argument("--output_path", type=str, help="path to store the calculated metrics") parser.add_argument("--no_preproc_pred", action="store_true",) args = parser.parse_args() # Display settings print('------------ Options -------------') for k, v in sorted(vars(args).items()): print('%s: %s' % (str(k), str(v))) print('-------------- End ----------------') groundtruth = load_tvqa_plus_annotation(args.gt_path) if args.no_preproc_pred: prediction = load_json(args.pred_path) else: prediction = load_predictions(args.pred_path, args.gt_path, args.word2idx_path) word2idx = load_json(args.word2idx_path) bbox_metrics = compute_att_metrics_using_maskrcnn_voc(prediction["bbox"], groundtruth["bbox"], word2idx) temporal_metrics = compute_temporal_metrics(prediction["ts_answer"], groundtruth["ts_answer"]) all_metrics = merge_dicts([bbox_metrics, temporal_metrics]) print("QA Acc. {}\nGrd. mAP {}\nTemp. mIoU{}\nASA {}" .format(all_metrics["qa_acc"], all_metrics["overall_map"], all_metrics["miou"], all_metrics["ans_span_joint_acc@.5"])) if args.output_path: save_json_pretty(all_metrics, args.output_path)
Example #4
Source File: ml_models.py From Sarcasm-Detection with MIT License | 4 votes |
def ml_model(train_tokens, train_pos, y_train, test_tokens, test_pos, y_test): print("Processing TRAIN SET features...\n") start = time.time() train_pragmatic, train_lexical, train_pos, train_sent, train_topic, train_sim = extract_features.get_feature_set\ (train_tokens, train_pos, pragmatic=pragmatic, lexical=lexical, ngram_list=ngram_list, pos_grams=pos_grams, pos_ngram_list=pos_ngram_list, sentiment=sentiment, topic=topic, similarity=similarity, word2vec_map=word2vec_map) end = time.time() print("Completion time of extracting train models: %.3f s = %.3f min" % ((end - start), (end - start) / 60.0)) print("Processing TEST SET features...\n") start = time.time() test_pragmatic, test_lexical, test_pos, test_sent, test_topic, test_sim = extract_features.get_feature_set \ (test_tokens, test_pos, pragmatic=pragmatic, lexical=lexical, ngram_list=ngram_list, pos_grams=pos_grams, pos_ngram_list=pos_ngram_list, sentiment=sentiment, topic=topic, similarity=similarity, word2vec_map=word2vec_map) end = time.time() print("Completion time of extracting train models: %.3f s = %.3f min" % ((end - start), (end - start) / 60.0)) # Get all features together all_train_features = [train_pragmatic, train_lexical, train_pos, train_sent, train_topic, train_sim] all_test_features = [test_pragmatic, test_lexical, test_pos, test_sent, test_topic, test_sim] # Choose your feature options: you can run on all possible combinations of features sets_of_features = 6 feature_options = list(itertools.product([False, True], repeat=sets_of_features)) feature_options = feature_options[1:] # skip over the option in which all entries are false # OR Can select just the features that you want # From left to right, set to true if you want the feature to be active: # [Pragmatic, Lexical-grams, POS-grams, Sentiment, LDA topics, Similarity] # feature_options = [[True, True, True, True, True, True]] for option in feature_options: train_features = [{} for _ in range(len(train_tokens))] test_features = [{} for _ in range(len(test_tokens))] utils.print_features(option, ['Pragmatic', 'Lexical-grams', 'POS-grams', 'Sentiment', 'LDA topics', 'Similarity']) # Make a feature selection based on the current feature_option choice for i, o in enumerate(option): if o: for j, example in enumerate(all_train_features[i]): train_features[j] = utils.merge_dicts(train_features[j], example) for j, example in enumerate(all_test_features[i]): test_features[j] = utils.merge_dicts(test_features[j], example) # Vectorize and scale the features x_train, x_test = utils.extract_features_from_dict(train_features, test_features) x_train_scaled = preprocessing.scale(x_train, axis=0) x_test_scaled = preprocessing.scale(x_test, axis=0) print("Shape of the x train set (%d, %d)" % (len(x_train_scaled), len(x_train_scaled[0]))) print("Shape of the x test set (%d, %d)" % (len(x_test_scaled), len(x_test_scaled[0]))) # Run the model on the selection of features made start = time.time() utils.run_supervised_learning_models(x_train_scaled, y_train, x_test_scaled, y_test) end = time.time() print("Completion time of the Linear SVM model: %.3f s = %.3f min" % ((end - start), (end - start) / 60.0))
Example #5
Source File: test_connect_write.py From pykiteconnect with MIT License | 4 votes |
def setup_order_place(kiteconnect, variety, product, order_type, diff_constant=0.01, price_diff=1, bo_price_diff=1, price=None, validity=None, disclosed_quantity=None, trigger_price=None, squareoff=None, stoploss=None, trailing_stoploss=None, tag="itest"): """Place an order with custom fields enabled. Prices are calculated from live ltp and offset based on `price_diff` and `diff_constant`. All BO specific fields prices are diffed by `bo_price_diff`""" updated_params = utils.merge_dicts(params, { "product": product, "variety": variety, "order_type": order_type }) # NOT WORKING CURRENTLY # Raises exception since no price set # with pytest.raises(ex.InputException): # kiteconnect.place_order(**updated_params) if price or trigger_price: symbol = params["exchange"] + ":" + params["tradingsymbol"] ltp = kiteconnect.ltp(symbol) # Subtract last price with diff_constant % diff = ltp[symbol]["last_price"] * diff_constant round_off_decimal = diff % price_diff if price_diff > 0 else 0 base_price = ltp[symbol]["last_price"] - (diff - round_off_decimal) if price and trigger_price: updated_params["price"] = base_price updated_params["trigger_price"] = base_price - price_diff elif price: updated_params["price"] = base_price elif trigger_price: updated_params["trigger_price"] = base_price if stoploss: updated_params["stoploss"] = bo_price_diff if squareoff: updated_params["squareoff"] = bo_price_diff if trailing_stoploss: updated_params["trailing_stoploss"] = bo_price_diff order_id = kiteconnect.place_order(**updated_params) # delay order fetch so order is not in received state time.sleep(0.5) order = kiteconnect.order_history(order_id) return (updated_params, order_id, order)