#StackBounty: #javascript #performance #node.js #interview-questions #web-services Find currency exchange rates

Bounty: 50

Description:

Design a service to fetch exchange rate from a remote resource and
then calculate the exchange rate for each currency pair.

The remote resource contains the exchange rates of each currency in
Euro.

This is an interview assignment and I came up with an easy solution.

index.js

'use strict';

const joi = require('joi');

const api = require('./api');
const Exchange = require('./exchange');
const xmlParser = require('./parse-xml');

const schema = joi
  .object({
    source: joi.string().required().min(3).max(3).example('EUR'),
    target: joi.string().required().min(3).max(3).example('GBP')
  })
  .unknown()
  .required();

const defaults = {
  timeout: 1000 // 1 sec
};

const exchange = async (pair, options = {}) => {
  options = Object.assign({}, defaults, options);
  const {source, target} = joi.attempt(pair, schema);

  const {requestApi = api, parser = xmlParser} = options;

  const exchange = new Exchange(requestApi, parser, options);
  const rate = await exchange.convert({source, target});
  return {source, target, rate};
};

module.exports = exchange;

exchange.js

'use strict';

const URL = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml';

class Exchange {
  constructor(api, parser, options = {}) {
    this.api = api;
    this.options = options;
    this.parser = parser;
  }

  async convert({source, target}) {
    if (!this.xml) {
      await this.fetch();
      this.euroToAll = this.parser(this.xml);
    }
    const euroToSource = this.euroToAll[source];
    const euroToTarget = this.euroToAll[target];
    return exchange(euroToSource, euroToTarget);
  }

  async fetch() {
    const response = await this.api.fetch(URL, this.options);
    this.xml = response.body || '';
  }
}

function exchange(from, to) {
  return round(parseFloat(to) / parseFloat(from));
}

function round(result, digits = 4) {
  return Math.round(result * (10 ** digits)) / (10 ** digits);
}

module.exports = Exchange;

parse-xml.js

'use strict';

const xmldoc = require('xmldoc');
const debug = require('debug')('exchange-rate:parse');

const currencies = require('./currencies');

const parse = xml => {
  const doc = new xmldoc.XmlDocument(xml);
  const cube = doc.childNamed('Cube').childNamed('Cube');

  const rates = currencies.reduce(
    (accumulator, currency) => {
      const exchange = cube.childWithAttribute('currency', currency);
      if (exchange) {
        const {rate} = exchange.attr;
        accumulator[currency] = rate;
      } else {
        debug(`Node not found for currency: ${currency}`);
      }
      return accumulator;
    },
    {}
  );
  // Add EUR rate to make it consistent
  rates.EUR = '1.0';
  return rates;
};

module.exports = parse;

api.js

'use strict';

const got = require('got');

module.exports = {
  async fetch(url, options = {}) {
    return got(url, options);
  }
};

Questions:

  1. What if in future we need to add different providers with different representation? How can I make it more flexible and keep the core logic decoupled?
  2. I am also curious to know if the design of the api from the client perspective is good or it can be improved.
  3. In NodeJs we can define dependency via require but I found it difficult to mock them for testing so, I have tried at couple of places to pass dependencies via arguments, is this fine?


Get this bounty!!!

#StackBounty: #sql-server #performance #clustered-index When Azure recommends an index (part of natural key) with all other columns inc…

Bounty: 50

The table does have an identity key (current CI), but it’s barely used to query. Because the natural key is not ever-increasing I’m afraid of insert performance, fragmentation or other problems that I don’t foresee now.

The table is not wide, with just a few columns. It has about 8 million rows and bringing our site to a halt during peak times. (+1000s concurrent users). The data is not easily cacheable, because it is quite volatile and essential that it’s up to date.

There are a lot of reads on one column of the natural key, but also quite active inserting and updating. Say 8 reads, vs 1 updates vs 1 inserts.

Id (PK)         int
UserId*         int
Key1*           varchar(25)
Key2*           varchar(25)
Key3*           int
LastChanged     datetime2(7)
Value           varchar(25)
Invalid         bit

* this combination is the natural primary key

I need to query the most of the time on:

  • All rows for one UserId (most queried)
  • All rows for a list of UserIds (a lot of rows)
  • All rows for a list of UserIds with Key1 = X
  • All rows for a list of UserIds with Key2 = X
  • All rows for a list of UserIds with Key1 = X and Key2 = X

I know the final answer is always “profile it”, but we are quite on a time constraint here so any guidance or experienced opinions in advance would really be appreciated.

Thanks in advance,


Get this bounty!!!

#StackBounty: #model-selection #r-squared #performance #rms Why does the rank order of models differ for R squared and RMSE?

Bounty: 50

I am comparing $R^2$ and RMSE of different models. Interestingly, the rank ordering of the models with respect to $-R^2$ and RMSE is different and I do not understand why.

Here is an example in R:

library(caret) 

set.seed(0)
d<-SLC14_1(n=1000)
tc<-trainControl(method="cv",number=10)
t1<-train(y~.,data=d,method="glmnet",trControl=tc) 
order(t1$results$RMSE)==order(-t1$results$Rsquared)

Output:

[1]  TRUE FALSE  TRUE FALSE  TRUE FALSE FALSE FALSE  TRUE

Thus, the order if different for $-R^2$ suqared and $RMSE$.

The question is, why.

Let $SS_{res}$ be the sum of squared residuals $sum(y_i-f_i)^2$.

$RMSE$ is defined as $sqrt{SS_{res}/n}$.

$R^2$ is defined as $1-SS_{res}/SS_{tot}$ where $SS_{tot}$ is $sum(y_i-overline{y})^2$.

Since $SS_{res}=n(RMSE)^2$, we can write $R^2$ as $1-n(RMSE)^2/SS_{tot}$.
Since $n$ and $SS_{tot}$ are constant and the same for all models, $-R^2$ and $RMSE$ should strictly positively related. However, they are not since the ranking order is in practice not identical (see example code).

What is wrong with my argument?


Get this bounty!!!

#StackBounty: #python #performance #python-3.x #xml #memory-optimization Build embedded XML based on string containment

Bounty: 50

I have a list of strings that represent a path. These are unique, unsorted values that look like this:

li = ['1/', '1/2/4/', '1/23/4/', '1/2/', '1/1/3/', '1/2/3/4/', '1/1/', '1/23/', '1/2/3/']

It can also be assumed that every path has a parent, e.g. 1/2/ has parent 1/.

The goal is to organise these items in a structured XML format, where the parent-relation is clear, i.e. where deeper elements are part of their parents, mimicking a directory tree. Desired output for the list above would be:

<root>
  <include path="1/">
    <include path="1/1/">
      <include path="1/1/3/"/>
    </include>
    <include path="1/2/">
      <include path="1/2/3/">
        <include path="1/2/3/4/"/>
      </include>
      <include path="1/2/4/"/>
    </include>
    <include path="1/23/">
      <include path="1/23/4/"/>
    </include>
  </include>
</root>

The following works (run it here), but I am not sure about efficiency. The actual list can be thousands of values long. I am also curious about memory management, because I am writing all XML nodes in-memory. The eventual goal is to print the final XML to a file.

from xml.etree import ElementTree as ET
import re

li = ['1/', '1/2/4/', '1/23/4/', '1/2/', '1/1/3/', '1/2/3/4/', '1/1/', '1/23/', '1/2/3/']
li = sorted(li)
# In Python >= 3.6 dicts maintain order
# Sets don't? So use best of both worlds, dict: speed and order
di = {l: False for l in li}

root = ET.Element('root')

# First build the root nodes, remove them from dict
for p in list(di.keys()):
  if len(list(filter(None, p.split('/')))) == 1:
    del di[p]
    root.append(ET.Element('include', {'path': p}))

# Because dict (and so keys()) are ordered, we can assume that the nodes higher 
# in the include tree are created before the more in-depth ones
for p in list(di.keys()):
  parent_path = re.sub('d+/$', '', p)
  try:
    parent = root.find(f'.//include[@path="{parent_path}"]')
    parent.append(ET.Element('include', {'path': p}))
  except Exception:
    print(f"parent not found for {p}")

print(ET.tostring(root))

This indeed returns the expected output (not pretty-printed). I am wondering if there is a better option and whether there are flaws in my approach. I’ll only be using Python >= 3.6, which is quite important for the order of the dictionary.


Update

I noticed a bug with the sorting of the list/dict because actually the sorting was done on the strings which isn’t what we want. (e.g. 1001/ would be before 134/) I rewrote part of the code, and use sets assuming these are faster.

from xml.etree import ElementTree as ET
import re


def get_xml(paths):
    root = ET.Element('paths')

    root_paths = set()
    for p in paths:
        # If only one item, use as first-level node - e.g. 1/
        if len(list(filter(None, p.split('/')))) == 1:
            root_paths.add(p)
            root.append(ET.Element('include', {'path': p}))

    # Remove root_paths from set
    paths = paths.difference(root_paths)

    # We can't use regular sort because that'll sort by string
    # Instead, sort by array of ints representation. E.g. `12/1001/14/` -> [12, 1001, 14]
    sorted_paths = sorted(paths, key=lambda item: [int(n) for n in list(filter(None, item.split('/')))])
    for p in sorted_paths:
        # Find parent_path by removing last part of string
        parent_path = re.sub('d+/$', '', p)
        try:
            parent = root.find(f'.//include[@path="{parent_path}"]')
            parent.append(ET.Element('include', {'path': p}))
        except AttributeError:
            print(f"parent {parent_path} not found for {p}", flush=True)

    return root

s = {'1/', '1/2/4/', '1/23/4/', '1/2/', '1/1/3/', '1/2/3/4/', '1/1/', '1/23/', '1/2/3/'}

xml = get_xml(s)

print(ET.tostring(xml))


Get this bounty!!!

#StackBounty: #performance #c #memory-management Tail implementation using a dynamic queue library in C

Bounty: 50

As per K & R exercise 5-13 I’ve written my own version of tail in C. It uses a newer version of a library for a dynamic queue that I also wrote and submitted here before (I’ve made a lot of changes to it since then). I’d like to get some feedback on how my code performs, both memory wise and speed wise, and how I might improve this in the future. I’ve compared its performance to the GNU implementation of tail and found that for small files my program uses less memory but for larger files it uses a fair bit more (although I did find that GNU tail leaks memory – 96 bytes according to Valgrind) and I was hoping I could get some insight as to how it does this better.

tail.c

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dqueue.h>

#define MAX_LEN 256
#define MAX_LINES 32
#define DEFAULT_TAIL 10

int getlinep(char *s, int lim);

int main(int argc, char *argv[])
{
    char *line;
    char *temp;
    int tail_len = DEFAULT_TAIL;
    queue_t * queue = NULL;
    int elements;
    int len;

    if(!(queue = queue_init(sizeof(char *)))) {
        fprintf(stderr, "Error %d: Could not initialise queue!n", MEM_ERROR);
        return MEM_ERROR;
    }

    if(argc >= 2) {
        if(atoi(*(++argv))) {
            tail_len = -atoi(*argv);
            if(tail_len <= 0 || tail_len > MAX_LEN)
                tail_len = DEFAULT_TAIL;
        }
    }

    for(elements = 0; elements < tail_len; elements++) {
        if(!(line = malloc(MAX_LEN))) {
            fprintf(stderr, "Error: Memory allocation failure!n");
            return MEM_ERROR;
        }

        if(!getlinep(line, MAX_LEN)) {
            free(line);

            if(elements == 0) {
                queue_destroy(&queue);
                return EXIT_SUCCESS;
            }

            break;
        }

        queue_push(&line, queue);
    }

    if(elements == tail_len) {
        if(!(temp = malloc(MAX_LEN))) {
            fprintf(stderr, "Error: Memory allocation failure!n");
            return MEM_ERROR;
        }
        for(;;) {
            if(!(len = getlinep(temp, MAX_LEN))) {
                free(temp);
                break;
            }

            queue_pop(&line, queue);
            memcpy(line, temp, len + 1);
            queue_push(&line, queue);
        }
    }

    for(; elements > 0; elements--) {
        queue_pop(&line, queue);
        printf("%s", line);
        free(line);
    }

    queue_destroy(&queue);

    return EXIT_SUCCESS;
}

int getlinep(char *s, int lim)
{
    int i;

    for(i = 0; i < lim - 1 && (*s = getchar()) != EOF && *s != 'n'; i++, s++)
        ;

    if(*s == 'n') {
        s++;
        i++;
    }

    *s = '';

    return i;
}

dqueue.h

#ifndef DQUEUE_H
#define DQUEUE_H

#include <stdlib.h>                                                     /* Needed for size_t */

#define QUEUE_OK 0
#define MEM_ERROR -1                                                    /* Memory allocation error */
#define SIZE_ERROR -2                                                   /* Queue dimension error */
#define INDEX_ERROR -3                                                  /* No data at given index */

#define BLOCK_SIZE 1024

typedef struct queue_element_t
{
    void * data;                                                        /* Contains the data stored at this node */
    void * next;                                                        /* Contains the pointer to the next element, or NULL if it's the tail node */
} queue_element_t;

typedef struct
{
    queue_element_t *   head;                                           /* Pointer to the head of the queue */
    queue_element_t *   tail;                                           /* Pointer to the tail of the queue */
    size_t              size;                                           /* The number of elements in the queue */
    size_t              element_width;                                  /* The size of each element in the queue */
    size_t              tail_pos;                                       /* The byte offset of the data being pushed into the queue (i.e. in the tail block) */
    size_t              head_pos;                                       /* The byte offset of the data being popped out of the queue (i.e. in the head block) */
    int                 status;
} queue_t;

queue_t *   queue_init(size_t element_size);                            /* Initialise the queue data structure */
void        queue_pop(void * const element, queue_t * queue);           /* Pop an element from the front of the queue, deals with cleanup when the head node is empty */
int         queue_push(const void * const element, queue_t * queue);    /* Push an element to the back of the queue, creates a new block when tail node is full */
int         queue_debug(const queue_t * const queue);                   /* Print information about the queue, returns the queue status if a valid queue pointer is given  */
void        queue_destroy(queue_t * queue);                         /* Destroy the queue data structure and any associated nodes */

#endif

dqueue.c

/* 
 * Filename:    dqueue.c
 * Author:      Alexis Ferguson (highentropystring@gmail.com)
 * Date:        17/02/18
 * Licence:     GNU GPL V3
 *
 * Library for a lightweight, generic, and dynamically allocated queue
 *
 * Return/exit codes:
 *  QUEUE_OK        - No error
 *  SIZE_ERROR      - Queue size error (invalid block size or number of elements)
 *  MEM_ERROR       - Memory allocation error
 *  INDEX_ERROR     - Couldn't pop data from the queue
 *
 * All functions returning pointers will return NULL on memory allocation faliure, else they will specify an error in queue->status for the user to handle
 *
 * Todo:
 *  - Add secure versions of queue_destroy() and queue_pop() to overwrite memory blocks that are no longer in use
 * 
 */

#include <dqueue.h>
#include <stdio.h>
#include <string.h>

queue_t * queue_init(size_t element_width)
{
    queue_t * queue;

    if(!(queue = malloc(sizeof(queue_t))))
        return NULL;

    if(BLOCK_SIZE % element_width != 0 || (queue->element_width = element_width) <= 0) {
        queue->status = SIZE_ERROR;
        return queue;
    }

    queue->tail_pos = 0;
    queue->head_pos = 0;
    queue->tail     = NULL;
    queue->head     = NULL;
    queue->size     = 0;
    queue->status   = QUEUE_OK;

    return queue;
}

void queue_destroy(queue_t * queue)
{
    queue_element_t * temp;

    if(queue == NULL)
        return;

    while(queue->head) {
        free(queue->head->data);
        temp = queue->head->next;
        free(queue->head);
        queue->head = temp;
    }

    queue->size             = 0;
    queue->status           = 0;
    queue->element_width    = 0;
    queue->tail_pos         = 0;
    queue->head_pos         = 0;
    queue->tail             = NULL;

    free(queue);
}

int queue_push(const void * const element, queue_t * queue)
{
    queue_element_t * new_element;

    if(queue->tail_pos == 0) {
        if(!(new_element = malloc(sizeof(queue_element_t)))) {
            queue->status = MEM_ERROR;
            return queue->status;
        }

        if(!(new_element->data = malloc(BLOCK_SIZE))) {
            free(new_element);
            queue->status = MEM_ERROR;
            return queue->status;
        }

        if(queue->head == NULL)
            queue->head = new_element;
        else
            queue->tail->next = new_element;

        queue->tail = new_element;
        queue->tail->next = NULL;
        queue->size++;
    }

    memcpy(queue->tail->data + queue->tail_pos, element, queue->element_width); 

    queue->tail_pos += queue->element_width;

    if(queue->tail_pos >= BLOCK_SIZE)
        queue->tail_pos = 0;

    return queue->status;
}

void queue_pop(void * const element, queue_t * queue)
{
    queue_element_t * temp;

    if(queue->head == NULL || ((queue->head == queue->tail) && (queue->head_pos == queue->tail_pos))) {
        if(queue->tail_pos == 0) { /* Catch an error related to reseting the tail position and incrementing a block after a block has been filled */
            queue->tail_pos = BLOCK_SIZE;
        } else {
            queue->status = INDEX_ERROR;
            return;
        }
    }

    memcpy(element, queue->head->data + queue->head_pos, queue->element_width);

    queue->head_pos += queue->element_width;

    if(queue->head_pos >= BLOCK_SIZE) {
        free(queue->head->data);
        temp = queue->head;
        queue->head = queue->head->next;
        free(temp);

        queue->head_pos = 0;
        queue->size--;
    }
}

int queue_debug(const queue_t * const queue)
{
    if(queue == NULL) {
        printf("Error: Invalid queue pointer!n");
        return MEM_ERROR;
    }

    if(queue->status == QUEUE_OK) {
        printf("Queue is %d blocks long with an element width of %d bytes with each block containing %d elementsnQueue head is at %p and the current element is %pn", (int)queue->size, (int)queue->element_width, BLOCK_SIZE / (int)queue->element_width, (void *)queue->head, (void *)queue->tail);
    } else if(queue->status == MEM_ERROR) {
        printf("Memory error in queue!n");
    } else if(queue->status == SIZE_ERROR) {
        printf("Size error in queue!n");
    } else if(queue->status == INDEX_ERROR) {
        printf("Index error in queue!n");
    }

    return queue->status;
}


Get this bounty!!!

#StackBounty: #8 #performance Performance is notoriously slow

Bounty: 100

This feels like a repeat post but I can’t find an actual answer. I just installed Drupal 8.4.4 on an Azure App Service (S3 Standard 4core 7GB RAM / PHP 7.0.6) and Azure Database for MySQL (Standard 100 computes / v5.6.26.0). I’ve been told the host isn’t the issue.

Out of the box, the admin side is incredibly slow, like 4-5 seconds per click. I enabled page caching, and CSS/JS are aggregating. Dog slow. I worked through this and created my content types, added a couple of extensions, and enabled BigPipe (reported to help but didn’t).

What am I missing? How do I improve performance before I ask my content authors to start loading up content?

Status report from Drupal. (I have since fixed the reported trusted host settings error)

Performance of the page in Chrome. Looks like 2.8s waiting on the page

I created a test.php file, and queried mysql directly, the key_value table, and dumped all of that to the page, about 576 rows, and it returned instantly. Appears to be Drupal specifically?

Devel bar on public page 182 DB queries

Devel bar on admin page 233 DB queries


Get this bounty!!!

#StackBounty: #c #linux #performance #epoll #event-loop Eventloop has high ksoftirqd load; nginx does not but does same system-calls. W…

Bounty: 50

I wrote some code that has an epoll-eventloop, accepts new connections and pretends to be a http-server.
The posted code is the absolute minimum … I removed everything (including all error-checks) to make it as short and to the point as possible:

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <netinet/ip.h>
#include <netinet/in.h>
#include <sys/uio.h>
#include <unistd.h>


int main () {
    int servFd = socket (AF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_IP);
    int value = 1;
    setsockopt (servFd, SOL_SOCKET, SO_REUSEADDR, &value, sizeof (value));

    struct sockaddr_in servAddr;
    memset (&servAddr, 0, sizeof (servAddr));
    servAddr.sin_family = AF_INET;
    servAddr.sin_addr.s_addr = 0;
    servAddr.sin_port = htons (8081);
    bind (servFd, (struct sockaddr*)&servAddr, sizeof (servAddr));
    listen (servFd, 511);

    int efd = epoll_create1 (EPOLL_CLOEXEC);
    struct epoll_event epollEvt;
    epollEvt.events = EPOLLIN | EPOLLRDHUP;
    epollEvt.data.u32 = servFd;
    epoll_ctl (efd, EPOLL_CTL_ADD, servFd, &epollEvt);

    for (;;) {
        struct epoll_event pollEvent[512];
        int eventCount = epoll_wait (efd, pollEvent, 512, -1);
        for (int i = 0; i < eventCount; ++i) {
            struct epoll_event* curEvent = &pollEvent[i];
            if (curEvent->data.u32 == servFd) {
                int clientFd = accept4 (servFd, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC);
                struct epoll_event epollEvt;
                epollEvt.events = EPOLLIN | EPOLLRDHUP | EPOLLET;
                epollEvt.data.u32 = clientFd;
                epoll_ctl (efd, EPOLL_CTL_ADD, clientFd, &epollEvt);
                continue;
            }

            int clientFd = curEvent->data.u32;
            char recvBuffer[2048];
            recvfrom (clientFd, recvBuffer, 2048, 0, NULL, NULL);
            char sndMsg[] = "HTTP/1.0 200 OKnServer: TestnDate: Sun, 14 May 2017 15:40:26 GMTnContent-type: text/htmlnnHello world!";
            size_t sndMsgLength = sizeof (sndMsg) - 1;
            struct iovec sndBuffer;
            sndBuffer.iov_base = sndMsg;
            sndBuffer.iov_len = sndMsgLength;
            writev (clientFd, &sndBuffer, 1);
            close (clientFd);
        }
    }
    return 0;
}

localhost:~# gcc -Wall test.c -o test

localhost:~# gcc --version
gcc (Alpine 6.4.0) 6.4.0
Copyright (C) 2017 Free Software Foundation, Inc.
This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

I did some load-testing of this code and compared it with nginx to see if I did something wrong or if there is something to improve. I expected this code to be the fastest possible implementation since every other “real” webserver has to do a lot more stuff in userspace. But still … somehow nginx beats it in requests per second when using multiple load-generator threads. (Note that I configured nginx to use just one worker to handle every request)

//ab running 1 worker from local machine 
localhost:~# ab -n 100000 -c 1 http://10.0.0.111:8081/index.html
Requests per second:    13228.07 [#/sec] (mean)  //[to nginx]
Requests per second:    15300.35 [#/sec] (mean)  //[to testcode]
//ab running 4 worker from local machine 
localhost:~# ab -n 100000 -c 4 http://10.0.0.111:8081/index.html
Requests per second:    30902.63 [#/sec] (mean)  //[to nginx]
Requests per second:    24734.76 [#/sec] (mean)  //[to testcode]

The first test has the expected result … the test code is faster since it doesn’t do anything except generating a hard-coded response. But why is nginx faster in a multi-threading setting? How can that be?
The only explanation I can come up with is that there is something different in kernel-space and that nginx uses some sockopts (like TCP_FASTOPEN or TCP_DEFER_ACCEPT) or maybe some other system-calls to do its thing. Thats why I did some straces and made my code do the exact same thing as nginx does (from a kernel-perspective) –> you can see the strace attached below. Still … it is faster and I don’t understand why.

//ab running 50 worker from remote machine 
localhost:~# ab -n 100000 -c 50 http://10.0.0.111:8081/index.html
Requests per second:    27507.60 [#/sec] (mean)  //[to nginx]
Requests per second:    24208.51 [#/sec] (mean)  //[to testcode]

This test-cast has the exact same result but I noticed some difference in CPU-usage.

  • My test-code runs at about 60% CPU-load and ksoftirqd/0 runs at about 80%
  • nginx runs at about 99% CPU-load and ksoftirqd/0 runs at just 30%
  • ksoftirqd/0 has no noticeable CPU-load in the local-host setting in both cases

sTrace of nginx:

localhost:~# strace -tt -f /usr/sbin/nginx 
13:28:20.413497 execve("/usr/sbin/nginx", ["/usr/sbin/nginx"], 0x7a59e3d96490 /* 16 vars */) = 0
13:28:20.413987 arch_prctl(ARCH_SET_FS, 0x74ae1cf94b88) = 0
13:28:20.414161 set_tid_address(0x74ae1cf94bc0) = 2186
13:28:20.414350 open("/etc/ld-musl-x86_64.path", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
13:28:20.414519 open("/lib/libpcre.so.1", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
13:28:20.414679 open("/usr/local/lib/libpcre.so.1", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
13:28:20.414886 open("/usr/lib/libpcre.so.1", O_RDONLY|O_CLOEXEC) = 3
13:28:20.415067 fcntl(3, F_SETFD, FD_CLOEXEC) = 0
13:28:20.415230 fstat(3, {st_mode=S_IFREG|0755, st_size=370360, ...}) = 0
13:28:20.415415 read(3, "177ELF2113>1@24"..., 960) = 960
13:28:20.415599 mmap(NULL, 2469888, PROT_READ|PROT_EXEC, MAP_PRIVATE, 3, 0) = 0x74ae1caa9000
13:28:20.415809 mmap(0x74ae1cd02000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, 3, 0x59000) = 0x74ae1cd02000
13:28:20.416020 close(3)                = 0
13:28:20.416218 open("/lib/libssl.so.44", O_RDONLY|O_CLOEXEC) = 3
13:28:20.416396 fcntl(3, F_SETFD, FD_CLOEXEC) = 0
13:28:20.416517 fstat(3, {st_mode=S_IFREG|0755, st_size=309664, ...}) = 0
13:28:20.416692 read(3, "177ELF2113>1pv1"..., 960) = 960
13:28:20.416939 mmap(NULL, 2408448, PROT_READ|PROT_EXEC, MAP_PRIVATE, 3, 0) = 0x74ae1c85d000
13:28:20.417120 mmap(0x74ae1caa1000, 32768, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, 3, 0x44000) = 0x74ae1caa1000
13:28:20.417337 close(3)                = 0
13:28:20.417504 open("/lib/libcrypto.so.42", O_RDONLY|O_CLOEXEC) = 3
13:28:20.417644 fcntl(3, F_SETFD, FD_CLOEXEC) = 0
13:28:20.417802 fstat(3, {st_mode=S_IFREG|0755, st_size=1714280, ...}) = 0
13:28:20.418090 read(3, "177ELF2113>10046"..., 960) = 960
13:28:20.418269 mmap(NULL, 3825664, PROT_READ|PROT_EXEC, MAP_PRIVATE, 3, 0) = 0x74ae1c4b7000
13:28:20.418472 mmap(0x74ae1c836000, 159744, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, 3, 0x17f000) = 0x74ae1c836000
13:28:20.418808 mmap(0x74ae1c859000, 16384, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x74ae1c859000
13:28:20.419067 close(3)                = 0
13:28:20.419280 open("/lib/libz.so.1", O_RDONLY|O_CLOEXEC) = 3
13:28:20.419478 fcntl(3, F_SETFD, FD_CLOEXEC) = 0
13:28:20.419716 fstat(3, {st_mode=S_IFREG|0755, st_size=91952, ...}) = 0
13:28:20.419901 read(3, "177ELF2113>1260!"..., 960) = 960
13:28:20.420065 mmap(NULL, 2191360, PROT_READ|PROT_EXEC, MAP_PRIVATE, 3, 0) = 0x74ae1c2a0000
13:28:20.420246 mmap(0x74ae1c4b5000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, 3, 0x15000) = 0x74ae1c4b5000
13:28:20.420429 close(3)                = 0
13:28:20.420621 mprotect(0x74ae1cd02000, 4096, PROT_READ) = 0
13:28:20.420932 mprotect(0x74ae1caa1000, 16384, PROT_READ) = 0
13:28:20.421552 mprotect(0x74ae1c836000, 118784, PROT_READ) = 0
13:28:20.421794 mprotect(0x74ae1c4b5000, 4096, PROT_READ) = 0
13:28:20.422001 mprotect(0x74ae1cf91000, 4096, PROT_READ) = 0
13:28:20.422309 mprotect(0xd10421a5000, 8192, PROT_READ) = 0
13:28:20.422553 brk(NULL)               = 0xd104602de80
13:28:20.422687 brk(0xd1046032000)      = 0xd1046032000
13:28:20.423269 brk(0xd1046033000)      = 0xd1046033000
13:28:20.423621 brk(0xd1046034000)      = 0xd1046034000
13:28:20.423875 brk(0xd1046035000)      = 0xd1046035000
13:28:20.424206 brk(0xd1046036000)      = 0xd1046036000
13:28:20.424570 brk(0xd1046037000)      = 0xd1046037000
13:28:20.424861 brk(0xd1046038000)      = 0xd1046038000
13:28:20.425098 brk(0xd1046039000)      = 0xd1046039000
13:28:20.425435 brk(0xd104603a000)      = 0xd104603a000
13:28:20.425605 brk(0xd104603b000)      = 0xd104603b000
13:28:20.425826 brk(0xd104603c000)      = 0xd104603c000
13:28:20.426096 brk(0xd104603d000)      = 0xd104603d000
13:28:20.426369 open("/etc/localtime", O_RDONLY|O_NONBLOCK|O_CLOEXEC) = 3
13:28:20.426549 fstat(3, {st_mode=S_IFREG|0644, st_size=127, ...}) = 0
13:28:20.426723 mmap(NULL, 127, PROT_READ, MAP_SHARED, 3, 0) = 0x74ae1cf8c000
13:28:20.426847 close(3)                = 0
13:28:20.427023 getpid()                = 2186
13:28:20.427164 open("/var/lib/nginx/logs/error.log", O_WRONLY|O_CREAT|O_APPEND, 0644) = 3
13:28:20.427341 brk(0xd104603e000)      = 0xd104603e000
13:28:20.427503 open("/etc/ssl/openssl.cnf", O_RDONLY) = 4
13:28:20.427680 brk(0xd104603f000)      = 0xd104603f000
13:28:20.427819 readv(4, [{iov_base="", iov_len=0}, {iov_base="[ req ]n#default_bitstt= 2048n#d"..., iov_len=1024}], 2) = 745
13:28:20.428089 brk(0xd1046040000)      = 0xd1046040000
13:28:20.428243 readv(4, [{iov_base="", iov_len=0}, {iov_base="", iov_len=1024}], 2) = 0
13:28:20.428476 close(4)                = 0
13:28:20.428718 brk(0xd1046041000)      = 0xd1046041000
13:28:20.428880 brk(0xd1046042000)      = 0xd1046042000
13:28:20.429179 brk(0xd1046043000)      = 0xd1046043000
13:28:20.429319 brk(0xd1046044000)      = 0xd1046044000
13:28:20.429552 brk(0xd1046045000)      = 0xd1046045000
13:28:20.429775 brk(0xd1046046000)      = 0xd1046046000
13:28:20.429935 brk(0xd1046047000)      = 0xd1046047000
13:28:20.430220 brk(0xd1046048000)      = 0xd1046048000
13:28:20.430391 brk(0xd1046049000)      = 0xd1046049000
13:28:20.430515 brk(0xd104604b000)      = 0xd104604b000
13:28:20.430796 brk(0xd104604c000)      = 0xd104604c000
13:28:20.431042 brk(0xd104604d000)      = 0xd104604d000
13:28:20.431238 brk(0xd104604e000)      = 0xd104604e000
13:28:20.431396 brk(0xd104604f000)      = 0xd104604f000
13:28:20.431581 brk(0xd1046050000)      = 0xd1046050000
13:28:20.431820 brk(0xd1046051000)      = 0xd1046051000
13:28:20.432112 brk(0xd1046054000)      = 0xd1046054000
13:28:20.432374 brk(0xd1046055000)      = 0xd1046055000
13:28:20.432509 brk(0xd1046056000)      = 0xd1046056000
13:28:20.432666 brk(0xd1046057000)      = 0xd1046057000
13:28:20.432836 brk(0xd1046058000)      = 0xd1046058000
13:28:20.433004 brk(0xd1046059000)      = 0xd1046059000
13:28:20.433203 brk(0xd104605a000)      = 0xd104605a000
13:28:20.433400 brk(0xd104605b000)      = 0xd104605b000
13:28:20.433610 brk(0xd104605c000)      = 0xd104605c000
13:28:20.433740 brk(0xd104605d000)      = 0xd104605d000
13:28:20.433895 brk(0xd104605e000)      = 0xd104605e000
13:28:20.434020 brk(0xd104605f000)      = 0xd104605f000
13:28:20.434240 brk(0xd1046060000)      = 0xd1046060000
13:28:20.434419 brk(0xd1046061000)      = 0xd1046061000
13:28:20.434622 uname({sysname="Linux", nodename="localhost", ...}) = 0
13:28:20.434801 sched_getaffinity(0, 128, [0, 1, 2, 3, 4, 5]) = 32
13:28:20.435077 prlimit64(0, RLIMIT_NOFILE, NULL, {rlim_cur=1024, rlim_max=4*1024}) = 0
13:28:20.435260 brk(0xd1046066000)      = 0xd1046066000
13:28:20.435424 uname({sysname="Linux", nodename="localhost", ...}) = 0
13:28:20.435578 brk(0xd104606b000)      = 0xd104606b000
13:28:20.435700 open("/etc/nginx/nginx.conf", O_RDONLY) = 4
13:28:20.435912 fstat(4, {st_mode=S_IFREG|0644, st_size=2781, ...}) = 0
13:28:20.436115 pread64(4, "nnnuser nginx;npcre_jit on;n#tim"..., 2781, 0) = 2781
13:28:20.436284 geteuid()               = 0
13:28:20.436440 open("/etc/passwd", O_RDONLY|O_CLOEXEC) = 5
13:28:20.436596 fcntl(5, F_SETFD, FD_CLOEXEC) = 0
13:28:20.436725 fcntl(5, F_SETFD, FD_CLOEXEC) = 0
13:28:20.436857 readv(5, [{iov_base="", iov_len=0}, {iov_base="root:x:0:0:root:/root:/bin/ashnb"..., iov_len=1024}], 2) = 1024
13:28:20.437047 readv(5, [{iov_base="", iov_len=0}, {iov_base="sbin/nologinnntp:x:123:123:NTP:/"..., iov_len=1024}], 2) = 397
13:28:20.437235 lseek(5, -43, SEEK_CUR) = 1378
13:28:20.437353 close(5)                = 0
13:28:20.437520 open("/etc/group", O_RDONLY|O_CLOEXEC) = 5
13:28:20.437684 fcntl(5, F_SETFD, FD_CLOEXEC) = 0
13:28:20.437800 fcntl(5, F_SETFD, FD_CLOEXEC) = 0
13:28:20.437933 readv(5, [{iov_base="", iov_len=0}, {iov_base="root:x:0:rootnbin:x:1:root,bin,d"..., iov_len=1024}], 2) = 776
13:28:20.438097 close(5)                = 0
13:28:20.438240 epoll_create1(0)        = 5
13:28:20.438429 close(5)                = 0
13:28:20.438681 brk(0xd1046070000)      = 0xd1046070000
13:28:20.438842 brk(0xd1046072000)      = 0xd1046072000
13:28:20.439053 brk(0xd1046074000)      = 0xd1046074000
13:28:20.439175 brk(0xd1046076000)      = 0xd1046076000
13:28:20.439418 brk(0xd104607b000)      = 0xd104607b000
13:28:20.439655 mmap(NULL, 1052672, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x74ae1ce8b000
13:28:20.439886 brk(0xd1046080000)      = 0xd1046080000
13:28:20.440020 brk(0xd1046085000)      = 0xd1046085000
13:28:20.440225 open("/etc/nginx/mime.types", O_RDONLY) = 5
13:28:20.440380 fstat(5, {st_mode=S_IFREG|0644, st_size=3957, ...}) = 0
13:28:20.440523 pread64(5, "ntypes {n    text/html          "..., 3957, 0) = 3957
13:28:20.440725 close(5)                = 0
13:28:20.440977 brk(0xd104608a000)      = 0xd104608a000
13:28:20.441297 brk(0xd104608c000)      = 0xd104608c000
13:28:20.441453 close(4)                = 0
13:28:20.441587 mkdir("/var/tmp/nginx/client_body", 0700) = -1 EEXIST (File exists)
13:28:20.441814 stat("/var/tmp/nginx/client_body", {st_mode=S_IFDIR|0700, st_size=4096, ...}) = 0
13:28:20.442022 mkdir("/var/tmp/nginx/proxy", 0700) = -1 EEXIST (File exists)
13:28:20.442149 stat("/var/tmp/nginx/proxy", {st_mode=S_IFDIR|0700, st_size=4096, ...}) = 0
13:28:20.442257 mkdir("/var/tmp/nginx/fastcgi", 0700) = -1 EEXIST (File exists)
13:28:20.442407 stat("/var/tmp/nginx/fastcgi", {st_mode=S_IFDIR|0700, st_size=4096, ...}) = 0
13:28:20.442568 mkdir("/var/tmp/nginx/uwsgi", 0700) = -1 EEXIST (File exists)
13:28:20.442732 stat("/var/tmp/nginx/uwsgi", {st_mode=S_IFDIR|0700, st_size=4096, ...}) = 0
13:28:20.442945 mkdir("/var/tmp/nginx/scgi", 0700) = -1 EEXIST (File exists)
13:28:20.443051 stat("/var/tmp/nginx/scgi", {st_mode=S_IFDIR|0700, st_size=4096, ...}) = 0
13:28:20.443229 open("/var/log/nginx/access.log", O_WRONLY|O_CREAT|O_APPEND, 0644) = 4
13:28:20.443417 fcntl(4, F_SETFD, FD_CLOEXEC) = 0
13:28:20.443586 open("/var/log/nginx/error.log", O_WRONLY|O_CREAT|O_APPEND, 0644) = 5
13:28:20.443750 fcntl(5, F_SETFD, FD_CLOEXEC) = 0
13:28:20.443889 open("/var/lib/nginx/logs/error.log", O_WRONLY|O_CREAT|O_APPEND, 0644) = 6
13:28:20.444040 fcntl(6, F_SETFD, FD_CLOEXEC) = 0
13:28:20.444197 mmap(NULL, 2097152, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0) = 0x74ae1c0a0000
13:28:20.444382 socket(AF_INET, SOCK_STREAM, IPPROTO_IP) = 7
13:28:20.444515 setsockopt(7, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
13:28:20.444680 ioctl(7, FIONBIO, [1])  = 0
13:28:20.444808 bind(7, {sa_family=AF_INET, sin_port=htons(8081), sin_addr=inet_addr("0.0.0.0")}, 16) = 0
13:28:20.445015 listen(7, 511)          = 0
13:28:20.445140 listen(7, 511)          = 0
13:28:20.445326 mmap(NULL, 65536, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x74ae1ce7b000
13:28:20.445493 prlimit64(0, RLIMIT_NOFILE, NULL, {rlim_cur=1024, rlim_max=4*1024}) = 0
13:28:20.445671 mmap(NULL, 1280, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0) = 0x74ae1ce7a000
13:28:20.445817 rt_sigprocmask(SIG_UNBLOCK, [RT_1 RT_2], NULL, 8) = 0
13:28:20.445977 rt_sigaction(SIGHUP, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.446097 rt_sigaction(SIGUSR1, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.446247 rt_sigaction(SIGWINCH, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.446438 rt_sigaction(SIGTERM, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.446635 rt_sigaction(SIGQUIT, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.446886 rt_sigaction(SIGUSR2, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.447093 rt_sigaction(SIGALRM, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.447236 rt_sigaction(SIGINT, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.447446 rt_sigaction(SIGIO, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.447767 rt_sigaction(SIGCHLD, {sa_handler=0xd1041f1f3fc, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.447888 rt_sigaction(SIGSYS, {sa_handler=SIG_IGN, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.448094 rt_sigaction(SIGPIPE, {sa_handler=SIG_IGN, sa_mask=[], sa_flags=SA_RESTORER, sa_restorer=0x74ae1cd4a6cf}, NULL, 8) = 0
13:28:20.448253 rt_sigprocmask(SIG_BLOCK, ~[], [], 8) = 0
13:28:20.448396 fork(strace: Process 2187 attached
)                  = 2187
[pid  2187] 13:28:20.448594 gettid( <unfinished ...>
[pid  2186] 13:28:20.448643 rt_sigprocmask(SIG_SETMASK, [],  <unfinished ...>
[pid  2187] 13:28:20.448671 <... gettid resumed> ) = 2187
[pid  2186] 13:28:20.448700 <... rt_sigprocmask resumed> NULL, 8) = 0
[pid  2187] 13:28:20.448765 rt_sigprocmask(SIG_SETMASK, [],  <unfinished ...>
[pid  2186] 13:28:20.448792 exit_group(0 <unfinished ...>
[pid  2187] 13:28:20.448812 <... rt_sigprocmask resumed> NULL, 8) = 0
[pid  2186] 13:28:20.448836 <... exit_group resumed>) = ?
[pid  2187] 13:28:20.448854 getpid()    = 2187
[pid  2187] 13:28:20.448951 setsid( <unfinished ...>
[pid  2186] 13:28:20.449046 +++ exited with 0 +++
13:28:20.449055 <... setsid resumed> )  = 2187
13:28:20.449107 umask(000)              = 022
13:28:20.449212 open("/dev/null", O_RDWR) = 8
13:28:20.449309 dup2(8, 0)              = 0
13:28:20.449455 dup2(8, 1)              = 1
13:28:20.449573 close(8)                = 0
13:28:20.449692 open("/run/nginx/nginx.pid", O_RDWR|O_CREAT|O_TRUNC, 0644) = 8
13:28:20.449848 pwrite64(8, "2187n", 5, 0) = 5
13:28:20.449978 close(8)                = 0
13:28:20.450111 dup2(6, 2)              = 2
13:28:20.450218 close(3)                = 0
13:28:20.450376 rt_sigprocmask(SIG_BLOCK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], NULL, 8) = 0
13:28:20.450499 socketpair(AF_UNIX, SOCK_STREAM, 0, [3, 8]) = 0
13:28:20.450603 ioctl(3, FIONBIO, [1])  = 0
13:28:20.450696 ioctl(8, FIONBIO, [1])  = 0
13:28:20.450830 ioctl(3, FIOASYNC, [1]) = 0
13:28:20.450964 fcntl(3, F_SETOWN, 2187) = 0
13:28:20.451079 fcntl(3, F_SETFD, FD_CLOEXEC) = 0
13:28:20.451148 fcntl(8, F_SETFD, FD_CLOEXEC) = 0
13:28:20.451244 rt_sigprocmask(SIG_BLOCK, ~[], [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], 8) = 0
13:28:20.451379 fork(strace: Process 2188 attached
 <unfinished ...>
[pid  2188] 13:28:20.451596 gettid( <unfinished ...>
[pid  2187] 13:28:20.451615 <... fork resumed> ) = 2188
[pid  2187] 13:28:20.451727 rt_sigprocmask(SIG_SETMASK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO],  <unfinished ...>
[pid  2188] 13:28:20.451754 <... gettid resumed> ) = 2188
[pid  2187] 13:28:20.451774 <... rt_sigprocmask resumed> NULL, 8) = 0
[pid  2188] 13:28:20.451942 rt_sigprocmask(SIG_SETMASK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO],  <unfinished ...>
[pid  2187] 13:28:20.451969 rt_sigsuspend([], 8 <unfinished ...>
[pid  2188] 13:28:20.451985 <... rt_sigprocmask resumed> NULL, 8) = 0
[pid  2188] 13:28:20.452053 getpid()    = 2188
[pid  2188] 13:28:20.452330 rt_sigprocmask(SIG_BLOCK, ~[RTMIN RT_1 RT_2], [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], 8) = 0
[pid  2188] 13:28:20.452621 rt_sigprocmask(SIG_BLOCK, ~[], NULL, 8) = 0
[pid  2188] 13:28:20.452893 prlimit64(0, RLIMIT_NOFILE, {rlim_cur=8*1024, rlim_max=8*1024}, NULL) = 0
[pid  2188] 13:28:20.453075 futex(0x74ae1cf95064, FUTEX_WAKE_PRIVATE, 2147483647) = 0
[pid  2188] 13:28:20.453279 rt_sigprocmask(SIG_SETMASK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], NULL, 8) = 0
[pid  2188] 13:28:20.453487 geteuid()   = 0
[pid  2188] 13:28:20.453667 rt_sigprocmask(SIG_BLOCK, ~[RTMIN RT_1 RT_2], [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], 8) = 0
[pid  2188] 13:28:20.453861 rt_sigprocmask(SIG_BLOCK, ~[], NULL, 8) = 0
[pid  2188] 13:28:20.454091 setgid(103) = 0
[pid  2188] 13:28:20.454335 futex(0x74ae1cf95064, FUTEX_WAKE_PRIVATE, 2147483647) = 0
[pid  2188] 13:28:20.454583 rt_sigprocmask(SIG_SETMASK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], NULL, 8) = 0
[pid  2188] 13:28:20.454822 socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0) = 9
[pid  2188] 13:28:20.455183 connect(9, {sa_family=AF_UNIX, sun_path="/var/run/nscd/socket"}, 24) = -1 ENOENT (No such file or directory)
[pid  2188] 13:28:20.455537 close(9)    = 0
[pid  2188] 13:28:20.455800 open("/etc/group", O_RDONLY|O_CLOEXEC) = 9
[pid  2188] 13:28:20.456030 fcntl(9, F_SETFD, FD_CLOEXEC) = 0
[pid  2188] 13:28:20.456331 fcntl(9, F_SETFD, FD_CLOEXEC) = 0
[pid  2188] 13:28:20.456544 readv(9, [{iov_base="", iov_len=0}, {iov_base="root:x:0:rootnbin:x:1:root,bin,d"..., iov_len=1024}], 2) = 776
[pid  2188] 13:28:20.456799 readv(9, [{iov_base="", iov_len=0}, {iov_base="", iov_len=1024}], 2) = 0
[pid  2188] 13:28:20.456956 close(9)    = 0
[pid  2188] 13:28:20.457134 setgroups(3, [103, 82, 103]) = 0
[pid  2188] 13:28:20.457365 rt_sigprocmask(SIG_BLOCK, ~[RTMIN RT_1 RT_2], [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], 8) = 0
[pid  2188] 13:28:20.457534 rt_sigprocmask(SIG_BLOCK, ~[], NULL, 8) = 0
[pid  2188] 13:28:20.457818 setuid(102) = 0
[pid  2188] 13:28:20.457990 futex(0x74ae1cf95064, FUTEX_WAKE_PRIVATE, 2147483647) = 0
[pid  2188] 13:28:20.458159 rt_sigprocmask(SIG_SETMASK, [HUP INT QUIT USR1 USR2 ALRM TERM CHLD WINCH IO], NULL, 8) = 0
[pid  2188] 13:28:20.458378 prctl(PR_SET_DUMPABLE, SUID_DUMP_USER) = 0
[pid  2188] 13:28:20.458598 chdir("/var/www") = 0
[pid  2188] 13:28:20.458868 rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
[pid  2188] 13:28:20.459703 epoll_create1(0) = 9
[pid  2188] 13:28:20.459994 eventfd2(0, 0) = 10
[pid  2188] 13:28:20.460340 epoll_ctl(9, EPOLL_CTL_ADD, 10, {EPOLLIN|EPOLLET, {u32=1109208384, u64=14363479846208}}) = 0
[pid  2188] 13:28:20.460600 eventfd2(0, 0) = 11
[pid  2188] 13:28:20.460878 ioctl(11, FIONBIO, [1]) = 0
[pid  2188] 13:28:20.461043 io_setup(32, [0x74ae1ce79000]) = 0
[pid  2188] 13:28:20.461389 epoll_ctl(9, EPOLL_CTL_ADD, 11, {EPOLLIN|EPOLLET, {u32=1109208032, u64=14363479845856}}) = 0
[pid  2188] 13:28:20.461729 socketpair(AF_UNIX, SOCK_STREAM, 0, [12, 13]) = 0
[pid  2188] 13:28:20.462043 epoll_ctl(9, EPOLL_CTL_ADD, 12, {EPOLLIN|EPOLLRDHUP|EPOLLET, {u32=1109208032, u64=14363479845856}}) = 0
[pid  2188] 13:28:20.462255 close(13)   = 0
[pid  2188] 13:28:20.462608 epoll_pwait(9, [{EPOLLIN|EPOLLHUP|EPOLLRDHUP, {u32=1109208032, u64=14363479845856}}], 1, 5000, NULL, 8) = 1
[pid  2188] 13:28:20.462969 close(12)   = 0
[pid  2188] 13:28:20.463325 mmap(NULL, 987136, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x74ae1bfaf000
[pid  2188] 13:28:20.463517 mmap(NULL, 397312, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x74ae1ce18000
[pid  2188] 13:28:20.464039 mmap(NULL, 397312, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x74ae1cdb7000
[pid  2188] 13:28:20.466039 epoll_ctl(9, EPOLL_CTL_ADD, 7, {EPOLLIN|EPOLLRDHUP, {u32=469430304, u64=128291142561824}}) = 0
[pid  2188] 13:28:20.466432 close(3)    = 0
[pid  2188] 13:28:20.466763 epoll_ctl(9, EPOLL_CTL_ADD, 8, {EPOLLIN|EPOLLRDHUP, {u32=469430544, u64=128291142562064}}) = 0
//Eventloop starts here
[pid  2188] 13:28:20.467046 epoll_pwait(9, [{EPOLLIN, {u32=469430304, u64=128291142561824}}], 512, -1, NULL, 8) = 1
[pid  2188] 13:28:34.390021 accept4(7, {sa_family=AF_INET, sin_port=htons(54280), sin_addr=inet_addr("10.0.0.15")}, [112->16], SOCK_NONBLOCK) = 3
[pid  2188] 13:28:34.390110 epoll_ctl(9, EPOLL_CTL_ADD, 3, {EPOLLIN|EPOLLRDHUP|EPOLLET, {u32=469430784, u64=128291142562304}}) = 0
[pid  2188] 13:28:34.390188 epoll_pwait(9, [{EPOLLIN, {u32=469430784, u64=128291142562304}}], 512, 30000, NULL, 8) = 1
[pid  2188] 13:28:34.390245 recvfrom(3, "GET /index.html HTTP/1.0rnHost: "..., 2048, 0, NULL, NULL) = 93
[pid  2188] 13:28:34.390462 writev(3, [{iov_base="HTTP/1.1 200 OKrnServer: nginxrn"..., iov_len=134}], 1) = 134
[pid  2188] 13:28:34.390602 close(3)    = 0

sTrace of the test-code:

localhost:/~# strace -tt -f ./test 
13:31:19.964887 execve("./test", ["./test"], 0x721039661e10 /* 16 vars */) = 0
13:31:20.086769 arch_prctl(ARCH_SET_FS, 0x70311bc79b88) = 0
13:31:20.087599 set_tid_address(0x70311bc79bc0) = 2199
13:31:20.088375 mprotect(0x70311bc76000, 4096, PROT_READ) = 0
13:31:20.088717 mprotect(0x268c786b000, 4096, PROT_READ) = 0
13:31:20.088964 socket(AF_INET, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, IPPROTO_IP) = 3
13:31:20.089232 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
13:31:20.089402 bind(3, {sa_family=AF_INET, sin_port=htons(8081), sin_addr=inet_addr("0.0.0.0")}, 16) = 0
13:31:20.089579 listen(3, 511)          = 0
13:31:20.089797 epoll_create1(EPOLL_CLOEXEC) = 4
13:31:20.090018 epoll_ctl(4, EPOLL_CTL_ADD, 3, {EPOLLIN|EPOLLRDHUP, {u32=3, u64=3}}) = 0
13:31:20.090235 epoll_pwait(4, [{EPOLLIN, {u32=3, u64=3}}], 512, -1, NULL, 8) = 1
13:31:24.078593 accept4(3, NULL, NULL, SOCK_CLOEXEC|SOCK_NONBLOCK) = 5
13:31:24.078847 epoll_ctl(4, EPOLL_CTL_ADD, 5, {EPOLLIN|EPOLLRDHUP|EPOLLET, {u32=5, u64=5}}) = 0
13:31:24.079024 epoll_pwait(4, [{EPOLLIN, {u32=5, u64=5}}], 512, -1, NULL, 8) = 1
13:31:24.079197 recvfrom(5, "GET /index.html HTTP/1.0rnHost: "..., 2048, 0, NULL, NULL) = 93
13:31:24.079407 writev(5, [{iov_base="HTTP/1.0 200 OKnServer: TestnDat"..., iov_len=102}], 1) = 102
13:31:24.079604 close(5)                = 0

Edit:
I did some more traceing … 400000 request from a remote host … still no clue why this happens:

localhost:/~# strace -c -f /usr/sbin/nginx 
% time     seconds  usecs/call     calls    errors syscall
------ ----------- ----------- --------- --------- ----------------
 47.11    0.040309           0    400000           writev
 44.55    0.038115           0    400021           close
  3.11    0.002658           0    400002           accept4
  1.80    0.001538           0    400002           recvfrom
  1.74    0.001486           0    400007           epoll_ctl
  1.69    0.001450           0    400008           epoll_pwait

localhost:/~# strace -c -f ./test 
% time     seconds  usecs/call     calls    errors syscall
------ ----------- ----------- --------- --------- ----------------
 47.90    0.042760           0    400002           writev
 44.27    0.039518           0    400002           close
  3.13    0.002793           0    400002           accept4
  1.80    0.001610           0    400002           recvfrom
  1.57    0.001400           0    400005           epoll_pwait
  1.33    0.001183           0    400003           epoll_ctl


Get this bounty!!!

#StackBounty: #python #performance #neural-network Deep Neural Network in Python

Bounty: 100

I have written a neural network in Python and focused on adaptability and performance. I want to use it to dive deeper into that field. I am far from being an expert in neural networks and the same goes for Python. I do not want to use Tensorflow since I really want to understand how a neural network works.

My questions are:

  • How can I increase the performance? At the moment it takes days to train the network

The code runs on a single core. But since every loop over the batches run independently it can be parallelized.

  • How can I parallelize the loop over the batches?

I found some tutorials on parallel loops in Python but I could not apply it to my problem.

Here is my tested code with some pseudo training data:

from numpy import random, zeros, array, dot
from scipy.special import expit
import time 

def sigma(x):
    return expit(x)

def sigma_prime(x):
    u = expit(x)
    return  u-u*u 

def SGD(I, L, batch_size, eta):

    images = len(L)

    # Pre-activation
    z = [zeros((layer_size[l],1)) for l in range(1,nn_size)]

    # Activations
    a = [zeros((layer_size[l],1)) for l in range(nn_size)]

    # Ground truth      
    y = zeros((images, layer_size[-1]))
    for i in range(images):
        y[i,L[i]] = 1.0

    while (1):

        t0 = time.time()

        # Create random batch
        batch = random.randint(0,images,batch_size)

        dW = [zeros((layer_size[l+1], layer_size[l])) for l in range(nn_size-1)]
        db = [zeros((layer_size[l],1)) for l in range(1, nn_size)]

        for i in batch:        
            # Feedforward
            a[0] = array([I[i]]).T
            for l in range(nn_size-1):
                z[l] = dot(W[l], a[l]) + b[l]
                a[l+1] = sigma(z[l])

            # Backpropagation
            delta = (a[nn_size-1]-array([y[i]]).T) * sigma_prime(z[nn_size-2])
            dW[nn_size-2] += dot(delta, a[nn_size-2].T)
            dW[nn_size-2] += delta.dot(a[nn_size-2].T)
            db[nn_size-2] += delta
            for l in reversed(range(nn_size-2)):
                delta = dot(W[l+1].T, delta) * sigma_prime(z[l])
                dW[l] += dot(delta, a[l].T)
                db[l] += delta

        # Update Weights and Biases
        for l in range(nn_size-1):
            W[l] += - eta * dW[l] / batch_size
            b[l] += - eta * db[l] / batch_size

        print(time.time() - t0)

input_size = 1000
output_size = 10

layer_size = [input_size, 30**2, 30**2, 30**2, output_size]

nn_size = len(layer_size)
layer_size = layer_size

# Weights
W = [random.randn(layer_size[l+1],layer_size[l]) for l in range(nn_size-1)]

# Bias
b = [random.randn(layer_size[l],1) for l in range(1,nn_size)]

# Some random training data with label
size_training_data = 1000
I = random.rand(size_training_data, input_size)
L = random.randint(0,10, input_size)

batch_size = 100
eta = 0.1
SGD(I, L, batch_size, eta)


Get this bounty!!!

#StackBounty: #sql-server #performance #index #index-tuning #deadlock SQL deadlock on nonclustered key caused by two INSERTs and a CHEC…

Bounty: 50

Been struggling with deadlocking on a table during INSERTs. It’s a multi-tenant database and Read Committed Snapshot Isolation (RCSI) is enabled.

Dedlock graph

There is a CHECK CONSTRAINT upon INSERT to enforce logic around double bookings which executes a Scalar Valued Function and checks for a result of 0. This constraint and looks up the same table with a READCOMMITTEDLOCK hint to check for violations of the logic where the ID (PK/clustered index) doesn’t equal the ID of the newly inserted row.

The constraint does an INDEX SEEK on the index causing the deadlock: idx_report_foobar.

Any assistance would be greatly appreciated.

Here is the XML (which has been adjusted to remove some of the logic and names of table fields which are in the database):

<deadlock>
 <victim-list>
  <victimProcess id="process91591c108" />
 </victim-list>
 <process-list>
  <process id="process91591c108" taskpriority="0" logused="1328" waitresource="KEY: 9:72057594095861760 (c2e966d5eb6a)" waittime="3046" ownerId="2628292921" transactionname="user_transaction" lasttranstarted="2018-03-09T14:24:13.820" XDES="0x708a80d80" lockMode="S" schedulerid="10" kpid="8964" status="suspended" spid="119" sbid="2" ecid="0" priority="0" trancount="2" lastbatchstarted="2018-03-09T14:24:13.823" lastbatchcompleted="2018-03-09T14:24:13.820" lastattention="1900-01-01T00:00:00.820" clientapp=".Net SqlClient Data Provider" hostname="SERVERNAMEHERE" hostpid="33672" loginname="DOMAINUSERHERE" isolationlevel="read committed (2)" xactid="2628292921" currentdb="9" lockTimeout="4294967295" clientoption1="671088672" clientoption2="128056">
   <executionStack>
    <frame procname="mydb.dbo.CheckForDoubleBookings" line="12" stmtstart="920" stmtend="3200" sqlhandle="0x0300090018ef9b72531bea009ea8000000000000000000000000000000000000000000000000000000000000">
IF EXISTS (SELECT * 
                 FROM   dbo.bookings a WITH (READCOMMITTEDLOCK)
                 WHERE  a.id &lt;&gt; @id 
                        AND a.userID = @userID 
                        AND @bookingStart &lt; a.bookingEnd 
                        AND a.bookingStart &lt; @bookingEnd
                        AND a.eventID = @eventID
    </frame>
    <frame procname="adhoc" line="1" stmtstart="288" stmtend="922" sqlhandle="0x020000005ed9af11c02db2af69df1d5fb6d1adb0e4812afb0000000000000000000000000000000000000000">
unknown    </frame>
    <frame procname="unknown" line="1" sqlhandle="0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000">
unknown    </frame>
   </executionStack>
   <inputbuf>
(@0 datetime2(7),@1 datetime2(7),@2 int,@3 int,@4 int,@5 int,@6 int,@7 nvarchar(4000),@8 datetime2(7),@9 nvarchar(50),@10 int,@11 nvarchar(255))INSERT [dbo].[bookings]([bookingStart], [bookingEnd], [userID], [eventID], [TypeId], [Notes], [Timestamp], [AddedById])
VALUES (@0, @1, @2, @3, @4, @5, @6, @7, @8, NULL, @9, @10, @11, NULL, NULL)
SELECT [Id]
FROM [dbo].[bookings]
WHERE @@ROWCOUNT &gt; 0 AND [Id] = scope_identity()   </inputbuf>
  </process>
  <process id="processca27768c8" taskpriority="0" logused="1328" waitresource="KEY: 9:72057594095861760 (3ba50d420e66)" waittime="3048" ownerId="2628280537" transactionname="user_transaction" lasttranstarted="2018-03-09T14:24:04.063" XDES="0xa555403b0" lockMode="S" schedulerid="6" kpid="12776" status="suspended" spid="124" sbid="2" ecid="0" priority="0" trancount="2" lastbatchstarted="2018-03-09T14:24:04.070" lastbatchcompleted="2018-03-09T14:24:04.063" lastattention="1900-01-01T00:00:00.063" clientapp=".Net SqlClient Data Provider" hostname="SERVERNAMEHERE" hostpid="33672" loginname="DOMAINUSERHERE" isolationlevel="read committed (2)" xactid="2628280537" currentdb="9" lockTimeout="4294967295" clientoption1="671088672" clientoption2="128056">
   <executionStack>
    <frame procname="mydb.dbo.CheckForDoubleBookings" line="12" stmtstart="920" stmtend="3200" sqlhandle="0x0300090018ef9b72531bea009ea8000000000000000000000000000000000000000000000000000000000000">
IF EXISTS (SELECT * 
                 FROM   dbo.bookings a WITH (READCOMMITTEDLOCK)
                 WHERE  a.id &lt;&gt; @id 
                        AND a.userID = @userID 
                        AND @bookingStart &lt; a.bookingEnd 
                        AND a.bookingStart &lt; @bookingEnd
                        AND a.eventID = @eventID
    </frame>
    <frame procname="adhoc" line="1" stmtstart="288" stmtend="922" sqlhandle="0x020000005ed9af11c02db2af69df1d5fb6d1adb0e4812afb0000000000000000000000000000000000000000">
unknown    </frame>
    <frame procname="unknown" line="1" sqlhandle="0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000">
unknown    </frame>
   </executionStack>
   <inputbuf>
(@0 datetime2(7),@1 datetime2(7),@2 int,@3 int,@4 int,@5 int,@6 int,@7 nvarchar(4000),@8 datetime2(7),@9 nvarchar(50),@10 int,@11 nvarchar(255))INSERT [dbo].[bookings]([bookingStart], [bookingEnd], [userID], [eventID], [TypeId], [Notes], [Timestamp], [AddedById])
VALUES (@0, @1, @2, @3, @4, @5, @6, @7, @8, NULL, @9, @10, @11, NULL, NULL)
SELECT [Id]
FROM [dbo].[bookings]
WHERE @@ROWCOUNT &gt; 0 AND [Id] = scope_identity()   </inputbuf>
  </process>
 </process-list>
 <resource-list>
  <keylock hobtid="72057594095861760" dbid="9" objectname="mydb.dbo.bookings" indexname="idx_report_foobar" id="locke83fdbe80" mode="X" associatedObjectId="72057594095861760">
   <owner-list>
    <owner id="processca27768c8" mode="X" />
   </owner-list>
   <waiter-list>
    <waiter id="process91591c108" mode="S" requestType="wait" />
   </waiter-list>
  </keylock>
  <keylock hobtid="72057594095861760" dbid="9" objectname="mydb.dbo.bookings" indexname="idx_report_foobar" id="lock7fdb48480" mode="X" associatedObjectId="72057594095861760">
   <owner-list>
    <owner id="process91591c108" mode="X" />
   </owner-list>
   <waiter-list>
    <waiter id="processca27768c8" mode="S" requestType="wait" />
   </waiter-list>
  </keylock>
 </resource-list>
</deadlock>

The index:

CREATE NONCLUSTERED INDEX [idx_report_foobar] ON [dbo].[bookings]
(
    [eventID] ASC
)
INCLUDE (   [bookingStart],
    [bookingEnd],
    [userID]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON, FILLFACTOR = 80)
GO


Get this bounty!!!