Compare commits

...

10 Commits

Author SHA1 Message Date
320fcb343b Moved cells from data exploration after regressions. 2025-04-22 00:18:55 -07:00
a6bc83fa8b Started performing linear or log. regressions.
Split trails for different languages into their own cells.
2025-04-21 23:22:00 -07:00
f073019538 Created function to generate chart of salary over years of exp. 2025-04-20 22:00:57 -07:00
08eb095bf6 Added chart for years of experience and earnings.
Can select developers by programming language.
Colorize dots by country, employment status.
2025-04-20 20:32:27 -07:00
2d91e205a2 Came up with better names for axes (language comparison). 2025-04-20 07:47:29 -07:00
71d1efa292 Changed get_differences to result in a ratio. 2025-04-20 06:07:55 -07:00
c6096cfe6c Added new scatter plot.
Shows the difference between usage and desire to use a lang (=y)
over the usage of language (=x).
2025-04-19 20:03:19 -07:00
ea4ee3f493 Check against people who weren't paying attention. 2025-04-19 15:41:31 -07:00
0c5cc2259d Made all graphs to have tight bounding boxes. 2025-04-19 14:58:09 -07:00
cbd575697f Squashed commit of the following:
commit e1691bb85b611c84ae9e4315523de1b79837ef2b
Author: scuti <scuti@tutamail.com>
Date:   Sat Apr 19 14:00:28 2025 -0700

    Created graph for job title and compensation

commit 50e00a42686f7135508ca08d1354a36012e839d7
Author: scuti <scuti@tutamail.com>
Date:   Sat Apr 19 06:38:16 2025 -0700

    Got visualization idea for annual compensation
2025-04-19 14:10:44 -07:00

View File

@@ -30,14 +30,276 @@
"print(so_df.keys())\n",
"so_df.describe()\n",
"\n",
"# print(so_df[:3])"
"# check for people who aren't paying attention\n",
"count_not_apple = (so_df[\"Check\"] != \"Apples\").sum()\n",
"print(count_not_apple)\n",
"print(so_df.shape)\n",
"assert(count_not_apple == 0)\n",
"# print(so_df[:3])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0e9b0c49-eac6-45e1-83f1-92813e734ef5",
"metadata": {},
"outputs": [],
"source": [
"# draw count plot of developers based on age\n",
"\n",
"def visualize_devs(df, lang, key=\"Age\",):\n",
" plt.figure()\n",
" plt.xticks(rotation=45)\n",
" # from:\n",
" # print(df[key].unique())\n",
" order = ['Under 18 years old', '18-24 years old', \\\n",
" '25-34 years old','35-44 years old',\\\n",
" '45-54 years old', '55-64 years old', \\\n",
" '65 years or older', 'Prefer not to say']\n",
" sb.countplot(x=key, data=df, order=order)\n",
" title=\"Ages of %s Programmers\" % lang\n",
" plt.title(title)\n",
" filename= \"images/%s-of-%s-programmers.png\" % (key, lang)\n",
" plt.savefig(filename, bbox_inches=\"tight\")\n",
"\n",
"def get_lang_devs(df, lang):\n",
" col = \"LanguageHaveWorkedWith\"\n",
" # will not work for single character languages (C, R)\n",
" # will mangle Java and JavaScript, Python and MicroPython\n",
" return df[ df[col].str.contains(lang, na=False) ] \n",
"\n",
"def get_c_devs(df, lang=\"C\"):\n",
" key = \"LanguageHaveWorkedWith\"\n",
" cdevs = []\n",
" for index, dev in df.iterrows():\n",
" try:\n",
" # split string into list\n",
" langs_used = dev[key].split(';')\n",
" if lang in langs_used:\n",
" cdevs.append(dev)\n",
" except AttributeError:\n",
"# print(dev[key])\n",
" pass\n",
" return pd.DataFrame(cdevs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8212c27-6c76-4c8f-ba66-bbf1b5835c99",
"metadata": {},
"outputs": [],
"source": [
"\n",
"from sklearn.linear_model import LinearRegression, LogisticRegression\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import root_mean_squared_error\n",
"from sklearn.model_selection import StratifiedShuffleSplit\n",
"import traceback\n",
"import numpy as np\n",
"\n",
"# still haven't come up with a name\n",
"class Foo:\n",
" def __init__(self, dataset, language, jobs=None, n_rich_outliers=0, n_poor_outliers=0, country=\"United States of America\"):\n",
" self.devs = None\n",
" self.canvas = None\n",
" self.language = language\n",
" self.country = country\n",
" # focus on people who have given ...\n",
" key = \"ConvertedCompYearly\"\n",
" key2 = \"YearsCodePro\"\n",
" df = dataset.dropna(subset=[key, key2])\n",
" self.key = key\n",
" self.key2 = key2\n",
"\n",
" criteria = {\"MainBranch\":\"I am a developer by profession\"}\n",
"\n",
" #print(df[\"Country\"].unique)\n",
" if country:\n",
" criteria[\"Country\"] = country\n",
" for k in criteria:\n",
" df = df[df[k] == criteria[k] ] \n",
"\n",
" # chatgpt tells me about filtering with multiple strings\n",
" if jobs:\n",
" df = df[df.isin(jobs).any(axis=1)]\n",
"\n",
" devs = None\n",
" if len(language) > 1:\n",
" devs = get_lang_devs(df, language)\n",
" else:\n",
" devs = get_c_devs(df, lang=language)\n",
" replacement_dict = {\n",
" 'Less than 1 year': '0.5',\n",
" 'More than 50 years': '51',\n",
" }\n",
"\n",
" # https://stackoverflow.com/questions/47443134/update-column-in-pandas-dataframe-without-warning\n",
" pd.options.mode.chained_assignment = None # default='warn'\n",
" new_column = devs[key2].replace(replacement_dict)\n",
" devs[key2] = pd.to_numeric(new_column, errors='coerce')\n",
" pd.options.mode.chained_assignment = 'warn' # default='warn'\n",
" # print( devs[key2].unique() )\n",
" \n",
" indices = devs[key].nlargest(n_rich_outliers).index\n",
" devs = devs.drop(indices)\n",
" indices = devs[key].nsmallest(n_poor_outliers).index\n",
" self.devs = devs.drop(indices)\n",
" del devs, new_column, criteria\n",
" \n",
" def visualize(self, n_lowest=0, hue=\"Country\"): \n",
" self.canvas = plt.figure()\n",
" key = self.key\n",
" key2 = self.key2\n",
"\n",
" if n_lowest > 0:\n",
" # chatgpt draws my line\n",
" # Calculate the lowest nth point (for example, the 5th lowest value)\n",
" # iloc[-1] gets the last element from the n smallest\n",
" lowest_nth = self.devs[key].nsmallest(n_lowest).iloc[-1] \n",
" # Draw a horizontal line at the lowest nth point\n",
" # label=f'Lowest {n_poorest}th Point: {lowest_nth_value:.2f}'\n",
" plt.axhline(y=lowest_nth, color='purple', linestyle='--', label=\"y=%0.2f\" % lowest_nth )\n",
"\n",
" sb.scatterplot(data=self.devs, x=key2, y=key, hue=hue)\n",
" plt.legend(loc='lower center', bbox_to_anchor=(1.5,0)) \n",
" title = \"Annual Salary of %s Developers Over Years of Experience\" % self.language\\\n",
" + \"\\nsample size=%i\" % len (self.devs)\\\n",
" + \"\\ncountry=%s\" % self.country\n",
" plt.title(title)\n",
"\n",
" def run_regression(self, split=train_test_split, \n",
" x_transform=None, change_base=None, x_shift=0,\n",
" line_color='red'):\n",
" df = self.devs # .sort_values(by = self.key2)\n",
"# df['binned'] = pd.qcut(df[self.key], q=4, labels=False)\n",
" X = df[self.key2].to_frame() + x_shift\n",
" if x_transform is not None and change_base is not None:\n",
" X = x_transform (X, a=change_base ) \n",
" elif x_transform is not None:\n",
" X = x_transform (X) \n",
"\n",
" y = df[self.key].to_frame()\n",
"# y = df['binned']\n",
" \n",
" X_train, X_test, y_train, y_test = split(X, y, test_size=0.2, random_state=999)\n",
"\n",
" model = LinearRegression()\n",
" model.fit(X_train, y_train)\n",
" y_pred = model.predict(X_test)\n",
"\n",
" print(\"+----------------------+\")\n",
" print(\"coefficient =\", model.coef_)\n",
" print('intercept=', model.intercept_)\n",
" rmse = root_mean_squared_error(y_test, y_pred)\n",
" print(\"rmse = \", rmse)\n",
" print(\"sample predictions:\")\n",
" print(y_pred[3:6])\n",
" print(\"+----------------------+\")\n",
" \n",
" plt.figure(self.canvas)\n",
" plt.xlim(left=0, right=40) # Adjust these values as needed\n",
" plt.plot(X_test, y_pred, color=line_color, label='Regression Line')\n",
" plt.legend(loc='lower center', bbox_to_anchor=(1.5,0)) \n",
" del y_pred, model\n",
"\n",
"\n",
" def export_image(self, filename = \"images/programmers-%s-%s.png\"):\n",
" plt.figure(self.canvas)\n",
" plt.savefig(filename % (self.language, self.country), bbox_inches='tight')\n",
"\n",
"# the higher a is, the steeper the line gets\n",
"def log_base_a(x, a=1.07):\n",
" return np.log10(x)/np.log(a)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba81c59c-0610-4f71-96fb-9eddd7736329",
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"# expected python jobs\n",
"pyjobs = [\"Data scientist or machine learning specialist\",\n",
" \"Data or business analyst\",\n",
" \"Data engineer\",\n",
"# \"DevOps specialist\",\n",
"# \"Developer, QA or test\"\n",
"]\n",
"\n",
"python = Foo(so_df, \"Python\", jobs=pyjobs, n_rich_outliers=9, n_poor_outliers=2)\n",
"python.visualize(hue=\"DevType\")\n",
"# earnings vary widely after the first year\n",
"python.run_regression( x_transform=log_base_a, x_shift=1)\n",
"python.run_regression( x_transform=log_base_a, change_base=1.2, x_shift=1, line_color='magenta')\n",
"python.run_regression( x_transform=log_base_a, change_base=1.12, x_shift=1, line_color='lightgreen')\n",
"python.export_image()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0e27f76c-8f87-4c39-ac2f-5a9b2434466f",
"metadata": {},
"outputs": [],
"source": [
"# expected C jobs\n",
"cjobs = [\"Developer, embedded applications or devices\", \n",
" \"Developer, game or graphics\",\n",
" \"Hardware Engineer\" ,\n",
" # \"Project manager\", \n",
" # \"Product manager\"\n",
"]\n",
"c = Foo(so_df, \"C\", jobs=cjobs, n_rich_outliers=11)\n",
"c.visualize(n_lowest=3, hue=\"DevType\")\n",
"c.run_regression(x_transform=log_base_a, change_base=1.25)\n",
"c.export_image()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8357f841-23a0-4bfa-bf09-860bd3e014b8",
"metadata": {},
"outputs": [],
"source": [
"\n",
"jsjobs = [\"Developer, full-stack\",\n",
" \"Developer, front-end\",\n",
" \"Developer, mobile\"\n",
"]\n",
"\n",
"js = Foo(so_df, \"JavaScript\", jobs=jsjobs, n_rich_outliers=6, country=\"Ukraine\")\n",
"js.visualize(hue=\"DevType\")\n",
"js.export_image()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11a1b9fb-db48-4749-8d77-4241a99d7bad",
"metadata": {},
"outputs": [],
"source": [
"visualize_devs( get_c_devs(so_df) , \"C\")\n",
"\n",
"for lang in [\"Cobol\", \"Prolog\", \"Ada\", \"Python\"]:\n",
" foo = get_lang_devs(so_df, lang)\n",
" visualize_devs(foo, lang)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "35b9727a-176c-4193-a1f9-a508aecd2d1c",
"metadata": {},
"metadata": {
"jupyter": {
"source_hidden": true
}
},
"outputs": [],
"source": [
"# get popularity of different programming languages\n",
@@ -89,7 +351,7 @@
" plt.grid(axis='x', linestyle='--', alpha=0.75) \n",
" plt.title(\"%s vs %s\" % (label1, label2))\n",
" if saveto is not None:\n",
" plt.savefig(saveto)\n",
" plt.savefig(saveto, bbox_inches='tight')\n",
" del df, df2\n",
"\n",
"l1 = get_langs( so_df )\n",
@@ -110,18 +372,25 @@
"cell_type": "code",
"execution_count": null,
"id": "d0bfdb92-378a-4452-91cc-4d21afd2d6cc",
"metadata": {},
"metadata": {
"jupyter": {
"source_hidden": true
}
},
"outputs": [],
"source": [
"# draw horizontal bar plot\n",
"# https://seaborn.pydata.org/examples/part_whole_bars.html\n",
"\n",
"# investigate extrinsic vs intrinsic motivation\n",
"def get_difference(dict1, dict2):\n",
"def get_difference(dict1, dict2, proportion=False):\n",
" keys = dict1.keys()\n",
" result = dict()\n",
" for key in keys:\n",
" result[key] = dict1[key] - dict2[key]\n",
" if proportion:\n",
" result[key] = round((dict1[key] - dict2[key])/dict2[key],2)\n",
" else:\n",
" result[key] = dict1[key] - dict2[key]\n",
" return result\n",
"\n",
"def visualize_diff(diff_dict, color=\"lightblue\", saveto=None):\n",
@@ -132,7 +401,6 @@
" df = pd.DataFrame(diff_sorted.items(), columns=['Languages', 'Value'])\n",
" plt.figure(figsize=(15,20)) \n",
" sb.barplot(x=KEY, y='Languages', data=df, color=color)\n",
" \n",
" DELTA = '\\u0394'\n",
" for index, value in enumerate(df[KEY]):\n",
" # chatgpt annotates my chart\n",
@@ -144,44 +412,183 @@
" # Adjust the x position for negative values\n",
" plt.text(value, index, DELTA+str(value), va='center', ha='right') \n",
" lowest = 0\n",
" offset = 0.5\n",
" offset = 0\n",
" positive_values = df[df[KEY] > 0][KEY]\n",
" if not positive_values.empty:\n",
" lowest = positive_values.min()\n",
" offset = list(positive_values).count(lowest) \n",
" if len(positive_values) < len(df):\n",
" # don't draw the line if every value is greater than 0\n",
" plt.axhline(y=df[KEY].tolist().index(lowest) + offset, color='red', linestyle='--')\n",
" # don't draw the line if every value is greater than 0_\n",
" plt.axhline(y=df[KEY].tolist().index(lowest) + (offset-0.5), \n",
" color='red', linestyle='--', zorder=-1)\n",
" if saveto is not None:\n",
" plt.savefig(saveto)\n",
" plt.savefig(saveto, bbox_inches='tight')\n",
" \n",
"motiv_diff = get_difference(l2, l1)\n",
"motiv_diff = get_difference(l2, l1, proportion=True)\n",
"# print(motiv_diff)\n",
"visualize_diff(motiv_diff, saveto=\"images/delta.png\")\n",
"motiv_diff = get_difference(l2, l1)\n",
"visualize_diff(motiv_diff, saveto=\"images/delta-b.png\")\n",
"\n",
"# no clear description of what \"admired\" is\n",
"# in the schema\n",
"# but generally people want to use the languages\n",
"# they admire\n",
"\n",
"# determine level of hype\n",
"hype = get_difference(l4, l3)\n",
"# hype = get_difference(l4, l3)\n",
"# print(hype)\n",
"visualize_diff(hype, color=\"red\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e90cf119-c50d-468a-bc87-72dac41176ce",
"metadata": {},
"outputs": [],
"source": [
"# print survey ans\n",
"employment_status = Counter(so_df[\"MainBranch\"])\n",
"print(employment_status)\n",
"\n",
"print(so_df[\"ConvertedCompYearly\"][])"
"# visualize_diff(hype, color=\"red\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6b1a935-eeda-416f-8adf-5e854d3aa066",
"metadata": {
"jupyter": {
"source_hidden": true
}
},
"outputs": [],
"source": [
"# do people fall out of love with langs\n",
"# the more they are used professionally?\n",
"\n",
"def visualize_favor(df, key_x, key_y, MAGIC_X=0, MAGIC_Y=0, title=str(), saveto=None):\n",
" plt.figure()\n",
" OFFSET = 1 # push text away from point slightly\n",
" for i in range(merged.shape[0]):\n",
" # label points that aren't un a cluster\n",
" if merged[key_x][i] > MAGIC_X or merged[key_y][i] > MAGIC_Y:\n",
" plt.text(merged[key_x].iloc[i]+OFFSET, \n",
" merged[key_y].iloc[i]+OFFSET, \n",
" merged[\"Language\"].iloc[i], \n",
" ha=\"left\",\n",
" size='medium')\n",
"\n",
" sb.scatterplot(data=merged, x=key_x, y=key_y, hue=\"Language\")\n",
" plt.legend(loc='lower left', bbox_to_anchor=(0, -1.25), ncol=3) \n",
" plt.title(title)\n",
" if saveto is not None:\n",
" plt.savefig(saveto, bbox_inches='tight')\n",
" pass\n",
"key_x = \"Users\"\n",
"key_y = \"Potential '\\u0394'Users\"\n",
"df1 = pd.DataFrame(l1.items(), columns=['Language', key_x])\n",
"df2 = pd.DataFrame(motiv_diff.items(), columns=['Language', key_y])\n",
"# chatgpt tells me how to combine df\n",
"merged = pd.merge(df1, df2[[\"Language\", key_y]], on='Language', how='left')\n",
"visualize_favor(merged, key_x, key_y, \n",
" MAGIC_X=5000, MAGIC_Y=2000, \n",
" saveto=\"images/favor.png\")\n",
"del df1, df2, merged"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e90cf119-c50d-468a-bc87-72dac41176ce",
"metadata": {
"jupyter": {
"source_hidden": true
},
"scrolled": true
},
"outputs": [],
"source": [
"# see how much money are people making\n",
"\n",
"def get_mean_by_category(df, category, key=\"ConvertedCompYearly\"):\n",
" unique = df[category].unique()\n",
" result = dict()\n",
" for u in unique:\n",
" mean = df[df[category] == u][key].mean()\n",
" result[u] = mean\n",
" return result\n",
"\n",
"def show_me_the_money(df, saveto=None):\n",
" key_x = \"ConvertedCompYearly\"\n",
" key_y = \"DevType\"\n",
" \n",
" means = get_mean_by_category(df, key_y) \n",
" mean_df = pd.DataFrame(means.items(), columns=[key_y, key_x])\n",
"\n",
" plt.figure(figsize=(14,18)) \n",
" plt.axvline(x=1e5, color='red', linestyle='--', label=\"x = $100,000\")\n",
" plt.axvline(x=1e6, color='lightgreen', linestyle='--', label=\"x = millionaire\")\n",
" sb.barplot(x=key_x, y=key_y, data=mean_df.sort_values(by=key_x), \\\n",
" color='lavender', alpha=0.7, label=\"average compensation\")\n",
" sb.stripplot(x=key_x, y=key_y, data=df, \\\n",
" size=3, jitter=True)\n",
" if saveto is not None:\n",
" plt.savefig(saveto, bbox_inches='tight')\n",
" \n",
"# print survey ans\n",
"#employment_status = Counter(so_df[\"MainBranch\"])\n",
"#print(employment_status)\n",
"\n",
"#employment_type = Counter(so_df[\"DevType\"])\n",
"#print(employment_type)\n",
"\n",
"key = \"ConvertedCompYearly\"\n",
"# answers = so_df[:-1][key].count()\n",
"# print(answers, \"people answered re: \", key)\n",
"df_no_na = so_df.dropna(subset=[key])\n",
"indices = df_no_na[key].nlargest(15).index\n",
"\n",
"show_me_the_money( df_no_na.drop(indices), saveto=\"images/compensation-by-profession.png\" )\n",
"# could also ask myself what portion of developers \n",
"# earn less than the mean compensation\n",
"# (what titles have high standard deviations in earnings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cdf21b1c-1316-422f-ad14-48150f80366c",
"metadata": {
"jupyter": {
"source_hidden": true
}
},
"outputs": [],
"source": [
"\n",
"# key = \"DevType\"\n",
"# prof = \"Developer, full-stack\"\n",
"\n",
"key = \"MainBranch\"\n",
"prof = \"I am a developer by profession\"\n",
"col = \"ConvertedCompYearly\"\n",
"\n",
"devs = df_no_na[df_no_na[key] == prof ] \n",
"pd.set_option('display.float_format', '{:.2f}'.format)\n",
"devs.describe()[col]\n",
"\n",
"# who the hell is making $1/yr \n",
"# devs[devs[col] == 1.0]\n",
"\n",
"# who are the millionaires\n",
"# devs[devs[col] > 1e6]\n",
"\n",
"# who make more than the mean\n",
"# devs[devs[col] > 76230.84]\n",
"\n",
"# who make more than the median\n",
"# devs[devs[col] > 63316.00]\n",
"\n",
"# the ancient ones\n",
"so_df[so_df[\"YearsCodePro\"] == 'More than 50 years']\n",
"# should drop the 18-24 year old who is either bullshitting or recalls a past life\n",
"# 55-64 years old\n",
"# 65 years or older"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b954a811-e401-48dc-9ba4-263a5f2cf5c5",
"metadata": {},
"outputs": [],
"source": []