initBasti / Amazon2PlentySync (public) (License: GPLv3) (since 2019-01-27) (hash sha1)
Transfer your data from you Amazon Flatfile spreadsheet over to the Plentymarkets system. How to is included in the readme
List of commits:
Subject Hash Author Date (UTC)
current version Feb 2019 00b24836dd378f21942ed323c2b66f928b9fb4c4 Sebastian Fricke 2019-02-25 09:00:00
Changes to fit to the new flatfile format 91cd339571f607e88f6e922f1a47630c4c8d62a7 Sebastian Fricke 2019-02-08 13:28:02
Small removal of redundant code b271de0b1be1d83be088b00a35b5618af088b58a Sebastian Fricke 2019-01-30 18:08:15
General improvements and property upload bb48084db4359210eb892a04f1322f6fda822bef Sebastian Fricke 2019-01-30 17:43:32
Fixed scripts according to dataformat changes + readme dec28d9e6ff5c5c903d5ca01a969e661d43b66c6 Sebastian Fricke 2019-01-29 21:08:04
Working Checkboxes and file import 25378c68a6220c1c6570642920e6150a50415153 Sebastian Fricke 2019-01-29 21:03:23
Added checkboxes, descriptions, import and runbutton 2021f0960e70c8c229ec08488165dc01b998a6e0 Sebastian Fricke 2019-01-27 22:19:18
Added market connection, cosmetics in product import c9a771d5e7a3a80adc650e773c568e00dd8e2aea Sebastian Fricke 2019-01-23 15:01:47
Amazon Data Upload 33dbd0ed6945c01d8917ceae3cf3964f051a2288 Sebastian Fricke 2019-01-22 14:43:39
Readme started, amazon sku upload, vari upload, images f43a9e83598c3e4623bcb08667e2b4e649b2cdea Sebastian Fricke 2019-01-22 10:44:40
Amazon SKU Upload 8586da2ae91d49c81a0d9b6ff220c8a1b1b011a6 Sebastian Fricke 2019-01-16 18:36:54
Inital Commit with current working version of the CLI Tool and the work in progress of the GUI. 207fef4277f7c169aa79eb39ec1aaaab258b888c Sebastian Fricke 2019-01-16 09:47:43
Initial commit ba965ee75fe09437fb08da5edd25b20e39e17eff Sebastian Fricke 2019-01-16 09:42:30
Commit 00b24836dd378f21942ed323c2b66f928b9fb4c4 - current version Feb 2019
Author: Sebastian Fricke
Author date (UTC): 2019-02-25 09:00
Committer name: Sebastian Fricke
Committer date (UTC): 2019-02-25 09:00
Parent(s): 91cd339571f607e88f6e922f1a47630c4c8d62a7
Signing key:
Tree: e0d32484dea53d6328462db591b0c1404f16d292
File Lines added Lines deleted
main.py 5 0
packages/amazon_data_upload.py 23 5
packages/item_upload.py 33 46
packages/variation_upload.py 2 15
todo.md 4 0
File main.py added (mode: 100644) (index 0000000..666750b)
1 from packages.UploadGUI import UploadGUI
2
3 app = UploadGUI(None)
4 app.title("Amazon to Plentymarkets")
5 app.mainloop()
File packages/amazon_data_upload.py changed (mode: 100644) (index 1c6e169..3e4bd0d)
... ... def amazonSkuUpload(flatfile, export):
36 36
37 37 def amazonDataUpload(flatfile, export): def amazonDataUpload(flatfile, export):
38 38
39 column_names = ['ItemAmazonProductType', 'ItemProductType', 'bullet_point1',
39 column_names = ['ItemAmazonProductType', 'ItemAmazonFBA', 'bullet_point1',
40 40 'bullet_point2', 'bullet_point3', 'bullet_point4', 'bullet_point2', 'bullet_point3', 'bullet_point4',
41 41 'bullet_point5', 'fit_type', 'bullet_point5', 'fit_type',
42 42 'lifestyle', 'batteries_required', 'lifestyle', 'batteries_required',
 
... ... def amazonDataUpload(flatfile, export):
44 44 'supplier_declared_dg_hz_regulation2', 'supplier_declared_dg_hz_regulation2',
45 45 'supplier_declared_dg_hz_regulation3', 'supplier_declared_dg_hz_regulation3',
46 46 'supplier_declared_dg_hz_regulation4', 'supplier_declared_dg_hz_regulation4',
47 'supplier_declared_dg_hz_regulation5', 'ItemID']
47 'supplier_declared_dg_hz_regulation5', 'ItemID',
48 'ItemShippingWithAmazonFBA']
48 49
49 50 Data = SortedDict() Data = SortedDict()
50 51
51 52 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
52 53 reader = csv.DictReader(item, delimiter=";") reader = csv.DictReader(item, delimiter=";")
53 54
55 type_id = {
56 'accessory':28,
57 'shirt':13,
58 'pants':15,
59 'dress':18,
60 'outerwear':21,
61 'bags':27
62 }
63
64 product_type = ''
65
54 66 for row in reader: for row in reader:
55 67 if(row['parent_child'] == 'parent'): if(row['parent_child'] == 'parent'):
56 values = [row['feed_product_type'], row['feed_product_type'],
68
69 if(row['feed_product_type'].lower() in [*type_id]):
70 for key in type_id:
71 if(row['feed_product_type'].lower() == key):
72 product_type = type_id[key]
73
74 values = [product_type, '1',
57 75 row['bullet_point1'], row['bullet_point2'], row['bullet_point1'], row['bullet_point2'],
58 76 row['bullet_point3'], row['bullet_point4'], row['bullet_point3'], row['bullet_point4'],
59 77 row['bullet_point5'], row['fit_type'], row['bullet_point5'], row['fit_type'],
 
... ... def amazonDataUpload(flatfile, export):
63 81 row['supplier_declared_dg_hz_regulation3'], row['supplier_declared_dg_hz_regulation3'],
64 82 row['supplier_declared_dg_hz_regulation4'], row['supplier_declared_dg_hz_regulation4'],
65 83 row['supplier_declared_dg_hz_regulation5'], row['supplier_declared_dg_hz_regulation5'],
66 '']
84 '','1']
67 85 Data[row['item_sku']] = SortedDict(zip(column_names, values)) Data[row['item_sku']] = SortedDict(zip(column_names, values))
68 86
69 87 with open(export, mode='r') as item: with open(export, mode='r') as item:
 
... ... def asinUpload(export, stock):
87 105
88 106 for row in reader: for row in reader:
89 107 if row['VariationID']: if row['VariationID']:
90 values = [ '', '1', '', row['VariationID'] ]
108 values = [ '', '1', '0', row['VariationID'] ]
91 109
92 110 Data[row['VariationNumber']] = dict(zip(column_names, values)) Data[row['VariationNumber']] = dict(zip(column_names, values))
93 111
File packages/item_upload.py changed (mode: 100644) (index 98f0978..7130bff)
... ... def itemUpload(flatfile, intern):
42 42
43 43 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
44 44 reader = csv.DictReader(item, delimiter=";") reader = csv.DictReader(item, delimiter=";")
45
46
47 relationship = ['parent_child', 'Variantenbestandteil']
48 45 for row in reader: for row in reader:
49 try:
50 if(row[relationship[0]]):
51 relationcolum = relationship[0]
52 except KeyError:
53 if(row[relationship[1]]):
54 relationcolum = relationship[1]
55 except KeyError as err:
56 print(err)
57 print("There seems to be a new Flatfile, please check column for parent\n",
58 " & child relationship for the headername and enter it within the\n",
59 " first with open(flatfile....)")
60 exit(1)
61 # transform the text format to integer in order to adjust the
62 # height, width, length numbers from centimeter to milimeter
63
64 if(row[relationcolum]):
46 # transform the text format to integer in order to adjust the
47 # height, width, length numbers from centimeter to milimeter
48
49 if(row['parent_child'] == 'parent'):
65 50 try: try:
66 51 if(row['package_height'] and if(row['package_height'] and
67 52 row['package_length'] and row['package_length'] and
 
... ... def itemUpload(flatfile, intern):
89 74 # combine the keyword columns into a single one # combine the keyword columns into a single one
90 75 # after that check the size of the keywords # after that check the size of the keywords
91 76 # because the maximum for amazon is 250byte # because the maximum for amazon is 250byte
92 if(row['generic_keywords1']):
93 keywords = ''
94 try:
95 keywords = str(row['generic_keywords1'] + '' +
96 row['generic_keywords2'] + '' +
97 row['generic_keywords3'] + '' +
98 row['generic_keywords4'] + '' +
99 row['generic_keywords5'])
100 except Exception as err:
101 print(err)
102 print("The combination of the keywords failed!")
103 else if(row['generic_keywords']):
104 keywords = 'generic_keywords'
77 # if('generic_keywords1' in headers):
78 # if(row['generic_keywords1']):
79 # keywords = ''
80 # try:
81 # keywords = str(row['generic_keywords1'] + '' +
82 # row['generic_keywords2'] + '' +
83 # row['generic_keywords3'] + '' +
84 # row['generic_keywords4'] + '' +
85 # row['generic_keywords5'])
86 # except Exception as err:
87 # print(err)
88 # print("The combination of the keywords failed!")
89 if(row['generic_keywords']):
90 keywords = row[ 'generic_keywords' ]
105 91
106 92 try: try:
107 93 values = ['', row['item_sku'], row['package_length'] * 10, values = ['', row['item_sku'], row['package_length'] * 10,
 
... ... def itemPropertyUpload(flatfile, export):
147 133
148 134 material[row['item_sku']] = 4 material[row['item_sku']] = 4
149 135 value[row['item_sku']] = "Baumwolle" value[row['item_sku']] = "Baumwolle"
150 if(re.search(r'(hemp|hanf)',
151 row['outer_material_type'].lower())):
136 if(re.search(r'(hemp|hanf)',
137 row['outer_material_type'].lower())):
152 138
153 material[row['item_sku']] = 5
154 value[row['item_sku']] = "Hanf"
155 if(re.search(r'(viskose|viscose)',
156 row['outer_material_type'].lower())):
139 material[row['item_sku']] = 5
140 value[row['item_sku']] = "Hanf"
141 if(re.search(r'(viskose|viscose)',
142 row['outer_material_type'].lower())):
157 143
158 material[row['item_sku']] = 6
144 material[row['item_sku']] = 6
159 145 value[row['item_sku']] = "Viskose" value[row['item_sku']] = "Viskose"
160 146
161 147 with open(export, mode='r') as item: with open(export, mode='r') as item:
 
... ... def itemPropertyUpload(flatfile, export):
175 161
176 162 Data[row['VariationNumber'] + '1'] = dict(zip(column_names, Data[row['VariationNumber'] + '1'] = dict(zip(column_names,
177 163 values)) values))
178 values = [material[row['VariationNumber']],
179 row['ItemID'],
180 row['VariationName'],
181 'de',
182 value[row['VariationNumber']]]
183
184 Data[row['VariationNumber'] + '2'] = dict(zip(column_names,
164 if(row['VariationNumber'] in [*material]):
165 values = [material[row['VariationNumber']],
166 row['ItemID'],
167 row['VariationName'],
168 'de',
169 value[row['VariationNumber']]]
170
171 Data[row['VariationNumber'] + '2'] = dict(zip(column_names,
185 172 values)) values))
186 173 variation_upload.writeCSV(Data, "property", column_names) variation_upload.writeCSV(Data, "property", column_names)
File packages/variation_upload.py changed (mode: 100644) (index c48af03..58aa1d6)
... ... def variationUpload(flatfile, intern_number):
47 47
48 48 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
49 49 reader = DictReader(item, delimiter=";") reader = DictReader(item, delimiter=";")
50
51 relationship = ['parent_child', 'Variantenbestandteil']
52 50 for row in reader: for row in reader:
53 try:
54 if(row[relationship[0]]):
55 relationcolum = relationship[0]
56 except KeyError:
57 if(row[relationship[1]]):
58 relationcolum = relationship[1]
59 except KeyError as err:
60 print(err)
61 print("There seems to be a new Flatfile, please check column for parent\n",
62 " & child relationship for the headername and enter it within the\n",
63 " first with open(flatfile....")
64 if(row[relationcolum] == 'parent'):
51 if(row['parent_child'] == 'parent'):
65 52 item_name = row['item_name'] item_name = row['item_name']
66 if(row[relationcolum] == 'child'):
53 if(row['parent_child'] == 'child'):
67 54 try: try:
68 55 if(row['package_height'] and if(row['package_height'] and
69 56 row['package_length'] and row['package_length'] and
File todo.md added (mode: 100644) (index 0000000..627b921)
1 <b>Todo</b>
2 Create the fitting header list for the item__upload.py file which contains the
3 head columns of the csv.
4 The variable is allready in use at the generic keyword scrap.
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using git:
git clone git://git.rocketgit.com/user/initBasti/Amazon2PlentySync

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main