Project

General

Profile

0001-qa-verify-the-benefits-of-mempool-cacheline-optimiza.patch

Loïc Dachary, 03/19/2021 07:45 AM

Download (5.01 KB)

View differences:

qa/standalone/c2c/c2c.sh
1
#!/usr/bin/env bash
2

  
3
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
4

  
5
# http://people.redhat.com/jmario/scratch/run_c2c_ceph.sh
6
function run_perf_c2c() {
7
    # First get some background system info 
8
    uname -a > uname.out
9
    lscpu > lscpu.out
10
    cat /proc/cmdline > cmdline.out
11
    timeout -s INT 10 vmstat -w 1 > vmstat.out
12

  
13
    nodecnt=`lscpu|grep "NUMA node(" |awk '{print $3}'`
14
    for ((i=0; i<$nodecnt; i++))
15
    do
16
       cat /sys/devices/system/node/node${i}/meminfo > meminfo.$i.out
17
    done
18
    more `find /proc -name status` > proc_parent_child_status.out
19
    more /proc/*/numa_maps > numa_maps.out
20
    
21
    #
22
    # Get separate kernel and user perf-c2c stats
23
    #
24
    perf c2c record -a --ldlat=70 --all-user -o perf_c2c_a_all_user.data sleep 5 
25
    perf c2c report --stdio -i perf_c2c_a_all_user.data > perf_c2c_a_all_user.out 2>&1
26
    perf c2c report --full-symbols --stdio -i perf_c2c_a_all_user.data > perf_c2c_full-sym_a_all_user.out 2>&1
27

  
28
    perf c2c record -g -a --ldlat=70 --all-user -o perf_c2c_g_a_all_user.data sleep 5 
29
    perf c2c report -g --stdio -i perf_c2c_g_a_all_user.data > perf_c2c_g_a_all_user.out 2>&1
30

  
31
    perf c2c record -a --ldlat=70 --all-kernel -o perf_c2c_a_all_kernel.data sleep 4 
32
    perf c2c report --stdio -i perf_c2c_a_all_kernel.data > perf_c2c_a_all_kernel.out 2>&1
33

  
34
    perf c2c record -g --ldlat=70 -a --all-kernel -o perf_c2c_g_a_all_kernel.data sleep 4
35

  
36
    perf c2c report -g --stdio -i perf_c2c_g_a_all_kernel.data > perf_c2c_g_a_all_kernel.out 2>&1
37

  
38
    #
39
    # Get combined kernel and user perf-c2c stats
40
    #
41
    perf c2c record -a --ldlat=70 -o perf_c2c_a_both.data sleep 4 
42
    perf c2c report --stdio -i perf_c2c_a_both.data > perf_c2c_a_both.out 2>&1
43

  
44
    perf c2c record -g --ldlat=70 -a --all-kernel -o perf_c2c_g_a_both.data sleep 4 
45
    perf c2c report -g --stdio -i perf_c2c_g_a_both.data > perf_c2c_g_a_both.out 2>&1
46

  
47
    #
48
    # Get all-user physical addr stats, in case multiple threads or processes are 
49
    # accessing shared memory with different vaddrs.
50
    #
51
    perf c2c record --phys-data -a --ldlat=70 --all-user -o perf_c2c_a_all_user_phys_data.data sleep 5 
52
    perf c2c report --stdio -i perf_c2c_a_all_user_phys_data.data > perf_c2c_a_all_user_phys_data.out 2>&1
53
}
54

  
55
ceph_test_c2c &
56
run_perf_c2c
57
kill $!
qa/suites/rados/standalone/workloads/c2c.yaml
1
roles:
2
- - mon.a
3
  - mgr.x
4
  - osd.0
5
  - client.0
6
tasks:
7
- install:
8
- workunit:
9
    basedir: qa/standalone
10
    clients:
11
      all:
12
        - c2c
src/test/CMakeLists.txt
471 471
  ceph_test_stress_watch
472 472
  DESTINATION ${CMAKE_INSTALL_BINDIR})
473 473

  
474
add_executable(ceph_test_c2c
475
  test_c2c.cc
476
  )
477
target_link_libraries(ceph_test_c2c
478
  ceph-common
479
  pthread
480
  ${EXTRALIBS}
481
  ${CMAKE_DL_LIBS}
482
  )
483

  
474 484
if(WITH_FUSE)
475 485
  add_executable(ceph_test_cfuse_cache_invalidate
476 486
    test_cfuse_cache_invalidate.cc
src/test/test_c2c.cc
1
#include <stdio.h>
2
#include <errno.h>
3
#include <string.h>
4
#include <fcntl.h>
5
#include <inttypes.h>
6
#include <sys/types.h>
7
#include <sys/stat.h>
8

  
9
#include "include/mempool.h"
10

  
11
int main(int argc, char **argv)
12
{
13
  const size_t samples = mempool::num_shards * 100;
14
  std::atomic_int shards[mempool::num_shards] = {0};
15
  std::vector<std::thread> workers;
16
  for (size_t i = 0; i < samples; i++) {
17
    workers.push_back(
18
      std::thread([&](){
19
          size_t i = mempool::pool_t::pick_a_shard_int();
20
          shards[i]++;
21
        }));
22
  }
23
  for (auto& t:workers) {
24
    t.join();
25
  }
26
  workers.clear();
27
  return 0;
28
}
0
-